xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision 3d1e062c803c9126bcd13f15318f39798d32978f)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "qapi/type-helpers.h"
15 #include "target/ppc/cpu.h"
16 #include "sysemu/cpus.h"
17 #include "sysemu/dma.h"
18 #include "sysemu/reset.h"
19 #include "monitor/monitor.h"
20 #include "hw/ppc/fdt.h"
21 #include "hw/ppc/pnv.h"
22 #include "hw/ppc/pnv_chip.h"
23 #include "hw/ppc/pnv_core.h"
24 #include "hw/ppc/pnv_xscom.h"
25 #include "hw/ppc/pnv_xive.h"
26 #include "hw/ppc/xive_regs.h"
27 #include "hw/qdev-properties.h"
28 #include "hw/ppc/ppc.h"
29 #include "trace.h"
30 
31 #include <libfdt.h>
32 
33 #include "pnv_xive_regs.h"
34 
35 #undef XIVE_DEBUG
36 
37 /*
38  * Virtual structures table (VST)
39  */
40 #define SBE_PER_BYTE   4
41 
42 typedef struct XiveVstInfo {
43     const char *name;
44     uint32_t    size;
45     uint32_t    max_blocks;
46 } XiveVstInfo;
47 
48 static const XiveVstInfo vst_infos[] = {
49     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
50     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
51     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
52     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
53 
54     /*
55      *  Interrupt fifo backing store table (not modeled) :
56      *
57      * 0 - IPI,
58      * 1 - HWD,
59      * 2 - First escalate,
60      * 3 - Second escalate,
61      * 4 - Redistribution,
62      * 5 - IPI cascaded queue ?
63      */
64     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
65 };
66 
67 #define xive_error(xive, fmt, ...)                                      \
68     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
69                   (xive)->chip->chip_id, ## __VA_ARGS__);
70 
71 /*
72  * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
73  * field overrides the hardwired chip ID in the Powerbus operations
74  * and for CAM compares
75  */
76 static uint8_t pnv_xive_block_id(PnvXive *xive)
77 {
78     uint8_t blk = xive->chip->chip_id;
79     uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
80 
81     if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
82         blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
83     }
84 
85     return blk;
86 }
87 
88 /*
89  * VST accessors for SBE, EAT, ENDT, NVT
90  *
91  * Indirect VST tables are arrays of VSDs pointing to a page (of same
92  * size). Each page is a direct VST table.
93  */
94 
95 #define XIVE_VSD_SIZE 8
96 
97 /* Indirect page size can be 4K, 64K, 2M, 16M. */
98 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
99 {
100      return page_shift == 12 || page_shift == 16 ||
101          page_shift == 21 || page_shift == 24;
102 }
103 
104 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
105                                          uint64_t vsd, uint32_t idx)
106 {
107     const XiveVstInfo *info = &vst_infos[type];
108     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
109     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
110     uint32_t idx_max;
111 
112     idx_max = vst_tsize / info->size - 1;
113     if (idx > idx_max) {
114 #ifdef XIVE_DEBUG
115         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
116                    info->name, idx, idx_max);
117 #endif
118         return 0;
119     }
120 
121     return vst_addr + idx * info->size;
122 }
123 
124 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
125                                            uint64_t vsd, uint32_t idx)
126 {
127     const XiveVstInfo *info = &vst_infos[type];
128     uint64_t vsd_addr;
129     uint32_t vsd_idx;
130     uint32_t page_shift;
131     uint32_t vst_per_page;
132 
133     /* Get the page size of the indirect table. */
134     vsd_addr = vsd & VSD_ADDRESS_MASK;
135     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
136                     MEMTXATTRS_UNSPECIFIED)) {
137         xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
138                    info->name, idx, vsd_addr);
139         return 0;
140     }
141 
142     if (!(vsd & VSD_ADDRESS_MASK)) {
143 #ifdef XIVE_DEBUG
144         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
145 #endif
146         return 0;
147     }
148 
149     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
150 
151     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
152         xive_error(xive, "VST: invalid %s page shift %d", info->name,
153                    page_shift);
154         return 0;
155     }
156 
157     vst_per_page = (1ull << page_shift) / info->size;
158     vsd_idx = idx / vst_per_page;
159 
160     /* Load the VSD we are looking for, if not already done */
161     if (vsd_idx) {
162         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
163         if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
164                        MEMTXATTRS_UNSPECIFIED)) {
165             xive_error(xive, "VST: failed to access %s entry %x @0x%"
166                        PRIx64, info->name, vsd_idx, vsd_addr);
167             return 0;
168         }
169 
170         if (!(vsd & VSD_ADDRESS_MASK)) {
171 #ifdef XIVE_DEBUG
172             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
173 #endif
174             return 0;
175         }
176 
177         /*
178          * Check that the pages have a consistent size across the
179          * indirect table
180          */
181         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
182             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
183                        info->name, idx);
184             return 0;
185         }
186     }
187 
188     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
189 }
190 
191 /*
192  * This is a simplified model of operation forwarding on a remote IC.
193  *
194  * A PC MMIO address is built to identify the NVT structure. The load
195  * on the remote IC will return the address of the structure in RAM,
196  * which will then be used by pnv_xive_vst_write/read to perform the
197  * RAM operation.
198  */
199 static uint64_t pnv_xive_vst_addr_remote(PnvXive *xive, uint32_t type,
200                                          uint64_t vsd, uint8_t blk,
201                                          uint32_t idx)
202 {
203     const XiveVstInfo *info = &vst_infos[type];
204     uint64_t remote_addr = vsd & VSD_ADDRESS_MASK;
205     uint64_t vst_addr;
206     MemTxResult result;
207 
208     if (type != VST_TSEL_VPDT) {
209         xive_error(xive, "VST: invalid access on remote VST %s %x/%x !?",
210                    info->name, blk, idx);
211         return 0;
212     }
213 
214     remote_addr |= ((uint64_t)idx) << xive->pc_shift;
215 
216     vst_addr = address_space_ldq_be(&address_space_memory, remote_addr,
217                                     MEMTXATTRS_UNSPECIFIED, &result);
218     if (result != MEMTX_OK) {
219         xive_error(xive, "VST: read failed at @0x%"  HWADDR_PRIx
220                    " for NVT %x/%x\n", remote_addr, blk, idx);
221         return 0;
222     }
223 
224     return vst_addr;
225 }
226 
227 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
228                                   uint32_t idx)
229 {
230     const XiveVstInfo *info = &vst_infos[type];
231     uint64_t vsd;
232 
233     if (blk >= info->max_blocks) {
234         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
235                    blk, info->name, idx);
236         return 0;
237     }
238 
239     vsd = xive->vsds[type][blk];
240 
241     /* Remote VST access */
242     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
243         return pnv_xive_vst_addr_remote(xive, type, vsd, blk, idx);
244     }
245 
246     if (VSD_INDIRECT & vsd) {
247         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
248     }
249 
250     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
251 }
252 
253 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
254                              uint32_t idx, void *data)
255 {
256     const XiveVstInfo *info = &vst_infos[type];
257     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
258     MemTxResult result;
259 
260     if (!addr) {
261         return -1;
262     }
263 
264     result = address_space_read(&address_space_memory, addr,
265                                 MEMTXATTRS_UNSPECIFIED, data,
266                                 info->size);
267     if (result != MEMTX_OK) {
268         xive_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
269                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
270         return -1;
271     }
272     return 0;
273 }
274 
275 #define XIVE_VST_WORD_ALL -1
276 
277 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
278                               uint32_t idx, void *data, uint32_t word_number)
279 {
280     const XiveVstInfo *info = &vst_infos[type];
281     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
282     MemTxResult result;
283 
284     if (!addr) {
285         return -1;
286     }
287 
288     if (word_number == XIVE_VST_WORD_ALL) {
289         result = address_space_write(&address_space_memory, addr,
290                                      MEMTXATTRS_UNSPECIFIED, data,
291                                      info->size);
292     } else {
293         result = address_space_write(&address_space_memory,
294                                      addr + word_number * 4,
295                                      MEMTXATTRS_UNSPECIFIED,
296                                      data + word_number * 4, 4);
297     }
298 
299     if (result != MEMTX_OK) {
300         xive_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
301                     "for VST %s %x/%x\n", addr, info->name, blk, idx);
302         return -1;
303     }
304     return 0;
305 }
306 
307 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
308                             XiveEND *end)
309 {
310     PnvXive *xive = PNV_XIVE(xrtr);
311 
312     if (pnv_xive_block_id(xive) != blk) {
313         xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
314         return -1;
315     }
316 
317     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
318 }
319 
320 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
321                               XiveEND *end, uint8_t word_number)
322 {
323     PnvXive *xive = PNV_XIVE(xrtr);
324 
325     if (pnv_xive_block_id(xive) != blk) {
326         xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
327         return -1;
328     }
329 
330     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
331                               word_number);
332 }
333 
334 static int pnv_xive_end_update(PnvXive *xive)
335 {
336     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
337                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
338     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
339                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
340     int i;
341     uint64_t eqc_watch[4];
342 
343     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
344         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
345     }
346 
347     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
348                               XIVE_VST_WORD_ALL);
349 }
350 
351 static void pnv_xive_end_cache_load(PnvXive *xive)
352 {
353     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
354                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
355     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
356                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
357     uint64_t eqc_watch[4] = { 0 };
358     int i;
359 
360     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
361         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
362     }
363 
364     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
365         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
366     }
367 }
368 
369 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
370                             XiveNVT *nvt)
371 {
372     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
373 }
374 
375 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
376                               XiveNVT *nvt, uint8_t word_number)
377 {
378     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
379                               word_number);
380 }
381 
382 static int pnv_xive_nvt_update(PnvXive *xive)
383 {
384     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
385                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
386     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
387                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
388     int i;
389     uint64_t vpc_watch[8];
390 
391     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
392         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
393     }
394 
395     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
396                               XIVE_VST_WORD_ALL);
397 }
398 
399 static void pnv_xive_nvt_cache_load(PnvXive *xive)
400 {
401     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
402                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
403     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
404                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
405     uint64_t vpc_watch[8] = { 0 };
406     int i;
407 
408     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
409         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
410     }
411 
412     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
413         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
414     }
415 }
416 
417 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
418                             XiveEAS *eas)
419 {
420     PnvXive *xive = PNV_XIVE(xrtr);
421 
422     /*
423      * EAT lookups should be local to the IC
424      */
425     if (pnv_xive_block_id(xive) != blk) {
426         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
427         return -1;
428     }
429 
430     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
431 }
432 
433 static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
434                            uint8_t *pq)
435 {
436     PnvXive *xive = PNV_XIVE(xrtr);
437 
438     if (pnv_xive_block_id(xive) != blk) {
439         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
440         return -1;
441     }
442 
443     *pq = xive_source_esb_get(&xive->ipi_source, idx);
444     return 0;
445 }
446 
447 static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
448                            uint8_t *pq)
449 {
450     PnvXive *xive = PNV_XIVE(xrtr);
451 
452     if (pnv_xive_block_id(xive) != blk) {
453         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
454         return -1;
455     }
456 
457     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
458     return 0;
459 }
460 
461 /*
462  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
463  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
464  * second register covers cores 16-23 (normal) or 8-11 (fused).
465  */
466 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
467 {
468     int pir = ppc_cpu_pir(cpu);
469     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
470     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
471     uint32_t bit = pir & 0x3f;
472 
473     return xive->regs[reg >> 3] & PPC_BIT(bit);
474 }
475 
476 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
477                               uint8_t nvt_blk, uint32_t nvt_idx,
478                               bool cam_ignore, uint8_t priority,
479                               uint32_t logic_serv, XiveTCTXMatch *match)
480 {
481     PnvXive *xive = PNV_XIVE(xptr);
482     PnvChip *chip = xive->chip;
483     int count = 0;
484     int i, j;
485 
486     for (i = 0; i < chip->nr_cores; i++) {
487         PnvCore *pc = chip->cores[i];
488         CPUCore *cc = CPU_CORE(pc);
489 
490         for (j = 0; j < cc->nr_threads; j++) {
491             PowerPCCPU *cpu = pc->threads[j];
492             XiveTCTX *tctx;
493             int ring;
494 
495             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
496                 continue;
497             }
498 
499             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
500 
501             /*
502              * Check the thread context CAM lines and record matches.
503              */
504             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
505                                              nvt_idx, cam_ignore, logic_serv);
506             /*
507              * Save the context and follow on to catch duplicates, that we
508              * don't support yet.
509              */
510             if (ring != -1) {
511                 if (match->tctx) {
512                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
513                                   "thread context NVT %x/%x\n",
514                                   nvt_blk, nvt_idx);
515                     return -1;
516                 }
517 
518                 match->ring = ring;
519                 match->tctx = tctx;
520                 count++;
521             }
522         }
523     }
524 
525     return count;
526 }
527 
528 static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr)
529 {
530     uint32_t cfg = 0;
531 
532     /* TIMA GEN1 is all P9 knows */
533     cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
534 
535     return cfg;
536 }
537 
538 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
539 {
540     return pnv_xive_block_id(PNV_XIVE(xrtr));
541 }
542 
543 /*
544  * The TIMA MMIO space is shared among the chips and to identify the
545  * chip from which the access is being done, we extract the chip id
546  * from the PIR.
547  */
548 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
549 {
550     int pir = ppc_cpu_pir(cpu);
551     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
552     PnvXive *xive = PNV_XIVE(xptr);
553 
554     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
555         xive_error(xive, "IC: CPU %x is not enabled", pir);
556     }
557     return xive;
558 }
559 
560 /*
561  * The internal sources (IPIs) of the interrupt controller have no
562  * knowledge of the XIVE chip on which they reside. Encode the block
563  * id in the source interrupt number before forwarding the source
564  * event notification to the Router. This is required on a multichip
565  * system.
566  */
567 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
568 {
569     PnvXive *xive = PNV_XIVE(xn);
570     uint8_t blk = pnv_xive_block_id(xive);
571 
572     xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
573 }
574 
575 /*
576  * XIVE helpers
577  */
578 
579 static uint64_t pnv_xive_vc_size(PnvXive *xive)
580 {
581     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
582 }
583 
584 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
585 {
586     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
587 }
588 
589 static uint64_t pnv_xive_pc_size(PnvXive *xive)
590 {
591     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
592 }
593 
594 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
595 {
596     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
597     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
598 
599     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
600 }
601 
602 /*
603  * Compute the number of entries per indirect subpage.
604  */
605 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
606 {
607     uint8_t blk = pnv_xive_block_id(xive);
608     uint64_t vsd = xive->vsds[type][blk];
609     const XiveVstInfo *info = &vst_infos[type];
610     uint64_t vsd_addr;
611     uint32_t page_shift;
612 
613     /* For direct tables, fake a valid value */
614     if (!(VSD_INDIRECT & vsd)) {
615         return 1;
616     }
617 
618     /* Get the page size of the indirect table. */
619     vsd_addr = vsd & VSD_ADDRESS_MASK;
620     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
621                    MEMTXATTRS_UNSPECIFIED)) {
622         xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
623                    info->name, vsd_addr);
624         return 0;
625     }
626 
627     if (!(vsd & VSD_ADDRESS_MASK)) {
628 #ifdef XIVE_DEBUG
629         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
630 #endif
631         return 0;
632     }
633 
634     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
635 
636     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
637         xive_error(xive, "VST: invalid %s page shift %d", info->name,
638                    page_shift);
639         return 0;
640     }
641 
642     return (1ull << page_shift) / info->size;
643 }
644 
645 /*
646  * EDT Table
647  *
648  * The Virtualization Controller MMIO region containing the IPI ESB
649  * pages and END ESB pages is sub-divided into "sets" which map
650  * portions of the VC region to the different ESB pages. It is
651  * configured at runtime through the EDT "Domain Table" to let the
652  * firmware decide how to split the VC address space between IPI ESB
653  * pages and END ESB pages.
654  */
655 
656 /*
657  * Computes the overall size of the IPI or the END ESB pages
658  */
659 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
660 {
661     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
662     uint64_t size = 0;
663     int i;
664 
665     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
666         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
667 
668         if (edt_type == type) {
669             size += edt_size;
670         }
671     }
672 
673     return size;
674 }
675 
676 /*
677  * Maps an offset of the VC region in the IPI or END region using the
678  * layout defined by the EDT "Domaine Table"
679  */
680 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
681                                               uint64_t type)
682 {
683     int i;
684     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
685     uint64_t edt_offset = vc_offset;
686 
687     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
688         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
689 
690         if (edt_type != type) {
691             edt_offset -= edt_size;
692         }
693     }
694 
695     return edt_offset;
696 }
697 
698 static void pnv_xive_edt_resize(PnvXive *xive)
699 {
700     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
701     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
702 
703     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
704     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
705 
706     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
707     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
708 }
709 
710 /*
711  * XIVE Table configuration. Only EDT is supported.
712  */
713 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
714 {
715     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
716     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
717     uint64_t *xive_table;
718     uint8_t max_index;
719 
720     switch (tsel) {
721     case CQ_TAR_TSEL_BLK:
722         max_index = ARRAY_SIZE(xive->blk);
723         xive_table = xive->blk;
724         break;
725     case CQ_TAR_TSEL_MIG:
726         max_index = ARRAY_SIZE(xive->mig);
727         xive_table = xive->mig;
728         break;
729     case CQ_TAR_TSEL_EDT:
730         max_index = ARRAY_SIZE(xive->edt);
731         xive_table = xive->edt;
732         break;
733     case CQ_TAR_TSEL_VDT:
734         max_index = ARRAY_SIZE(xive->vdt);
735         xive_table = xive->vdt;
736         break;
737     default:
738         xive_error(xive, "IC: invalid table %d", (int) tsel);
739         return -1;
740     }
741 
742     if (tsel_index >= max_index) {
743         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
744         return -1;
745     }
746 
747     xive_table[tsel_index] = val;
748 
749     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
750         xive->regs[CQ_TAR >> 3] =
751             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
752     }
753 
754     /*
755      * EDT configuration is complete. Resize the MMIO windows exposing
756      * the IPI and the END ESBs in the VC region.
757      */
758     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
759         pnv_xive_edt_resize(xive);
760     }
761 
762     return 0;
763 }
764 
765 /*
766  * Virtual Structure Tables (VST) configuration
767  */
768 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
769                                        uint8_t blk, uint64_t vsd)
770 {
771     XiveENDSource *end_xsrc = &xive->end_source;
772     XiveSource *xsrc = &xive->ipi_source;
773     const XiveVstInfo *info = &vst_infos[type];
774     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
775     uint64_t vst_tsize = 1ull << page_shift;
776     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
777 
778     /* Basic checks */
779 
780     if (VSD_INDIRECT & vsd) {
781         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
782             xive_error(xive, "VST: %s indirect tables are not enabled",
783                        info->name);
784             return;
785         }
786 
787         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
788             xive_error(xive, "VST: invalid %s page shift %d", info->name,
789                        page_shift);
790             return;
791         }
792     }
793 
794     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
795         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
796                    " page shift %d", info->name, vst_addr, page_shift);
797         return;
798     }
799 
800     /* Record the table configuration (in SRAM on HW) */
801     xive->vsds[type][blk] = vsd;
802 
803     /* Now tune the models with the configuration provided by the FW */
804 
805     switch (type) {
806     case VST_TSEL_IVT:  /* Nothing to be done */
807         break;
808 
809     case VST_TSEL_EQDT:
810         /*
811          * Backing store pages for the END.
812          *
813          * If the table is direct, we can compute the number of PQ
814          * entries provisioned by FW (such as skiboot) and resize the
815          * END ESB window accordingly.
816          */
817         if (!(VSD_INDIRECT & vsd)) {
818             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
819                                    * (1ull << xsrc->esb_shift));
820         }
821         memory_region_add_subregion(&xive->end_edt_mmio, 0,
822                                     &end_xsrc->esb_mmio);
823         break;
824 
825     case VST_TSEL_SBE:
826         /*
827          * Backing store pages for the source PQ bits. The model does
828          * not use these PQ bits backed in RAM because the XiveSource
829          * model has its own.
830          *
831          * If the table is direct, we can compute the number of PQ
832          * entries provisioned by FW (such as skiboot) and resize the
833          * ESB window accordingly.
834          */
835         if (!(VSD_INDIRECT & vsd)) {
836             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
837                                    * (1ull << xsrc->esb_shift));
838         }
839         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
840         break;
841 
842     case VST_TSEL_VPDT: /* Not modeled */
843     case VST_TSEL_IRQ:  /* Not modeled */
844         /*
845          * These tables contains the backing store pages for the
846          * interrupt fifos of the VC sub-engine in case of overflow.
847          */
848         break;
849 
850     default:
851         g_assert_not_reached();
852     }
853 }
854 
855 /*
856  * Both PC and VC sub-engines are configured as each use the Virtual
857  * Structure Tables : SBE, EAS, END and NVT.
858  */
859 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
860 {
861     uint8_t mode = GETFIELD(VSD_MODE, vsd);
862     uint8_t type = GETFIELD(VST_TABLE_SELECT,
863                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
864     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
865                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
866     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
867 
868     if (type > VST_TSEL_IRQ) {
869         xive_error(xive, "VST: invalid table type %d", type);
870         return;
871     }
872 
873     if (blk >= vst_infos[type].max_blocks) {
874         xive_error(xive, "VST: invalid block id %d for"
875                       " %s table", blk, vst_infos[type].name);
876         return;
877     }
878 
879     /*
880      * Only take the VC sub-engine configuration into account because
881      * the XiveRouter model combines both VC and PC sub-engines
882      */
883     if (pc_engine) {
884         return;
885     }
886 
887     if (!vst_addr) {
888         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
889         return;
890     }
891 
892     switch (mode) {
893     case VSD_MODE_FORWARD:
894         xive->vsds[type][blk] = vsd;
895         break;
896 
897     case VSD_MODE_EXCLUSIVE:
898         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
899         break;
900 
901     default:
902         xive_error(xive, "VST: unsupported table mode %d", mode);
903         return;
904     }
905 }
906 
907 /*
908  * Interrupt controller MMIO region. The layout is compatible between
909  * 4K and 64K pages :
910  *
911  * Page 0           sub-engine BARs
912  *  0x000 - 0x3FF   IC registers
913  *  0x400 - 0x7FF   PC registers
914  *  0x800 - 0xFFF   VC registers
915  *
916  * Page 1           Notify page (writes only)
917  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
918  *  0x800 - 0xFFF   forwards and syncs
919  *
920  * Page 2           LSI Trigger page (writes only) (not modeled)
921  * Page 3           LSI SB EOI page (reads only) (not modeled)
922  *
923  * Page 4-7         indirect TIMA
924  */
925 
926 /*
927  * IC - registers MMIO
928  */
929 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
930                                   uint64_t val, unsigned size)
931 {
932     PnvXive *xive = PNV_XIVE(opaque);
933     MemoryRegion *sysmem = get_system_memory();
934     uint32_t reg = offset >> 3;
935     bool is_chip0 = xive->chip->chip_id == 0;
936 
937     switch (offset) {
938 
939     /*
940      * XIVE CQ (PowerBus bridge) settings
941      */
942     case CQ_MSGSND:     /* msgsnd for doorbells */
943     case CQ_FIRMASK_OR: /* FIR error reporting */
944         break;
945     case CQ_PBI_CTL:
946         if (val & CQ_PBI_PC_64K) {
947             xive->pc_shift = 16;
948         }
949         if (val & CQ_PBI_VC_64K) {
950             xive->vc_shift = 16;
951         }
952         break;
953     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
954         /*
955          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
956          */
957         break;
958 
959     /*
960      * XIVE Virtualization Controller settings
961      */
962     case VC_GLOBAL_CONFIG:
963         break;
964 
965     /*
966      * XIVE Presenter Controller settings
967      */
968     case PC_GLOBAL_CONFIG:
969         /*
970          * PC_GCONF_CHIPID_OVR
971          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
972          */
973         break;
974     case PC_TCTXT_CFG:
975         /*
976          * TODO: block group support
977          */
978         break;
979     case PC_TCTXT_TRACK:
980         /*
981          * PC_TCTXT_TRACK_EN:
982          *   enable block tracking and exchange of block ownership
983          *   information between Interrupt controllers
984          */
985         break;
986 
987     /*
988      * Misc settings
989      */
990     case VC_SBC_CONFIG: /* Store EOI configuration */
991         /*
992          * Configure store EOI if required by firmware (skiboot has removed
993          * support recently though)
994          */
995         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
996             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
997         }
998         break;
999 
1000     case VC_EQC_CONFIG: /* TODO: silent escalation */
1001     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
1002         break;
1003 
1004     /*
1005      * XIVE BAR settings (XSCOM only)
1006      */
1007     case CQ_RST_CTL:
1008         /* bit4: resets all BAR registers */
1009         break;
1010 
1011     case CQ_IC_BAR: /* IC BAR. 8 pages */
1012         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
1013         if (!(val & CQ_IC_BAR_VALID)) {
1014             xive->ic_base = 0;
1015             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
1016                 memory_region_del_subregion(&xive->ic_mmio,
1017                                             &xive->ic_reg_mmio);
1018                 memory_region_del_subregion(&xive->ic_mmio,
1019                                             &xive->ic_notify_mmio);
1020                 memory_region_del_subregion(&xive->ic_mmio,
1021                                             &xive->ic_lsi_mmio);
1022                 memory_region_del_subregion(&xive->ic_mmio,
1023                                             &xive->tm_indirect_mmio);
1024 
1025                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
1026             }
1027         } else {
1028             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
1029             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
1030                 memory_region_add_subregion(sysmem, xive->ic_base,
1031                                             &xive->ic_mmio);
1032 
1033                 memory_region_add_subregion(&xive->ic_mmio,  0,
1034                                             &xive->ic_reg_mmio);
1035                 memory_region_add_subregion(&xive->ic_mmio,
1036                                             1ul << xive->ic_shift,
1037                                             &xive->ic_notify_mmio);
1038                 memory_region_add_subregion(&xive->ic_mmio,
1039                                             2ul << xive->ic_shift,
1040                                             &xive->ic_lsi_mmio);
1041                 memory_region_add_subregion(&xive->ic_mmio,
1042                                             4ull << xive->ic_shift,
1043                                             &xive->tm_indirect_mmio);
1044             }
1045         }
1046         break;
1047 
1048     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
1049     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
1050         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1051         if (!(val & CQ_TM_BAR_VALID)) {
1052             xive->tm_base = 0;
1053             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
1054                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1055             }
1056         } else {
1057             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1058             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
1059                 memory_region_add_subregion(sysmem, xive->tm_base,
1060                                             &xive->tm_mmio);
1061             }
1062         }
1063         break;
1064 
1065     case CQ_PC_BARM:
1066         xive->regs[reg] = val;
1067         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
1068         break;
1069     case CQ_PC_BAR: /* From 32M to 512G */
1070         if (!(val & CQ_PC_BAR_VALID)) {
1071             xive->pc_base = 0;
1072             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
1073                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
1074             }
1075         } else {
1076             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
1077             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
1078                 memory_region_add_subregion(sysmem, xive->pc_base,
1079                                             &xive->pc_mmio);
1080             }
1081         }
1082         break;
1083 
1084     case CQ_VC_BARM:
1085         xive->regs[reg] = val;
1086         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1087         break;
1088     case CQ_VC_BAR: /* From 64M to 4TB */
1089         if (!(val & CQ_VC_BAR_VALID)) {
1090             xive->vc_base = 0;
1091             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1092                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1093             }
1094         } else {
1095             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1096             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1097                 memory_region_add_subregion(sysmem, xive->vc_base,
1098                                             &xive->vc_mmio);
1099             }
1100         }
1101         break;
1102 
1103     /*
1104      * XIVE Table settings.
1105      */
1106     case CQ_TAR: /* Table Address */
1107         break;
1108     case CQ_TDR: /* Table Data */
1109         pnv_xive_table_set_data(xive, val);
1110         break;
1111 
1112     /*
1113      * XIVE VC & PC Virtual Structure Table settings
1114      */
1115     case VC_VSD_TABLE_ADDR:
1116     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1117         break;
1118     case VC_VSD_TABLE_DATA: /* Virtual table setting */
1119     case PC_VSD_TABLE_DATA:
1120         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1121         break;
1122 
1123     /*
1124      * Interrupt fifo overflow in memory backing store (Not modeled)
1125      */
1126     case VC_IRQ_CONFIG_IPI:
1127     case VC_IRQ_CONFIG_HW:
1128     case VC_IRQ_CONFIG_CASCADE1:
1129     case VC_IRQ_CONFIG_CASCADE2:
1130     case VC_IRQ_CONFIG_REDIST:
1131     case VC_IRQ_CONFIG_IPI_CASC:
1132         break;
1133 
1134     /*
1135      * XIVE hardware thread enablement
1136      */
1137     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1138     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1139         break;
1140 
1141     case PC_THREAD_EN_REG0_SET:
1142         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1143         break;
1144     case PC_THREAD_EN_REG1_SET:
1145         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1146         break;
1147     case PC_THREAD_EN_REG0_CLR:
1148         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1149         break;
1150     case PC_THREAD_EN_REG1_CLR:
1151         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1152         break;
1153 
1154     /*
1155      * Indirect TIMA access set up. Defines the PIR of the HW thread
1156      * to use.
1157      */
1158     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1159         break;
1160 
1161     /*
1162      * XIVE PC & VC cache updates for EAS, NVT and END
1163      */
1164     case VC_IVC_SCRUB_MASK:
1165     case VC_IVC_SCRUB_TRIG:
1166         break;
1167 
1168     case VC_EQC_CWATCH_SPEC:
1169         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1170         break;
1171     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1172         break;
1173     case VC_EQC_CWATCH_DAT0:
1174         /* writing to DATA0 triggers the cache write */
1175         xive->regs[reg] = val;
1176         pnv_xive_end_update(xive);
1177         break;
1178     case VC_EQC_SCRUB_MASK:
1179     case VC_EQC_SCRUB_TRIG:
1180         /*
1181          * The scrubbing registers flush the cache in RAM and can also
1182          * invalidate.
1183          */
1184         break;
1185 
1186     case PC_VPC_CWATCH_SPEC:
1187         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1188         break;
1189     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1190         break;
1191     case PC_VPC_CWATCH_DAT0:
1192         /* writing to DATA0 triggers the cache write */
1193         xive->regs[reg] = val;
1194         pnv_xive_nvt_update(xive);
1195         break;
1196     case PC_VPC_SCRUB_MASK:
1197     case PC_VPC_SCRUB_TRIG:
1198         /*
1199          * The scrubbing registers flush the cache in RAM and can also
1200          * invalidate.
1201          */
1202         break;
1203 
1204 
1205     /*
1206      * XIVE PC & VC cache invalidation
1207      */
1208     case PC_AT_KILL:
1209         break;
1210     case VC_AT_MACRO_KILL:
1211         break;
1212     case PC_AT_KILL_MASK:
1213     case VC_AT_MACRO_KILL_MASK:
1214         break;
1215 
1216     default:
1217         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1218         return;
1219     }
1220 
1221     xive->regs[reg] = val;
1222 }
1223 
1224 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1225 {
1226     PnvXive *xive = PNV_XIVE(opaque);
1227     uint64_t val = 0;
1228     uint32_t reg = offset >> 3;
1229 
1230     switch (offset) {
1231     case CQ_CFG_PB_GEN:
1232     case CQ_IC_BAR:
1233     case CQ_TM1_BAR:
1234     case CQ_TM2_BAR:
1235     case CQ_PC_BAR:
1236     case CQ_PC_BARM:
1237     case CQ_VC_BAR:
1238     case CQ_VC_BARM:
1239     case CQ_TAR:
1240     case CQ_TDR:
1241     case CQ_PBI_CTL:
1242 
1243     case PC_TCTXT_CFG:
1244     case PC_TCTXT_TRACK:
1245     case PC_TCTXT_INDIR0:
1246     case PC_TCTXT_INDIR1:
1247     case PC_TCTXT_INDIR2:
1248     case PC_TCTXT_INDIR3:
1249     case PC_GLOBAL_CONFIG:
1250 
1251     case PC_VPC_SCRUB_MASK:
1252 
1253     case VC_GLOBAL_CONFIG:
1254     case VC_AIB_TX_ORDER_TAG2:
1255 
1256     case VC_IRQ_CONFIG_IPI:
1257     case VC_IRQ_CONFIG_HW:
1258     case VC_IRQ_CONFIG_CASCADE1:
1259     case VC_IRQ_CONFIG_CASCADE2:
1260     case VC_IRQ_CONFIG_REDIST:
1261     case VC_IRQ_CONFIG_IPI_CASC:
1262 
1263     case VC_EQC_SCRUB_MASK:
1264     case VC_IVC_SCRUB_MASK:
1265     case VC_SBC_CONFIG:
1266     case VC_AT_MACRO_KILL_MASK:
1267     case VC_VSD_TABLE_ADDR:
1268     case PC_VSD_TABLE_ADDR:
1269     case VC_VSD_TABLE_DATA:
1270     case PC_VSD_TABLE_DATA:
1271     case PC_THREAD_EN_REG0:
1272     case PC_THREAD_EN_REG1:
1273         val = xive->regs[reg];
1274         break;
1275 
1276     /*
1277      * XIVE hardware thread enablement
1278      */
1279     case PC_THREAD_EN_REG0_SET:
1280     case PC_THREAD_EN_REG0_CLR:
1281         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1282         break;
1283     case PC_THREAD_EN_REG1_SET:
1284     case PC_THREAD_EN_REG1_CLR:
1285         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1286         break;
1287 
1288     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1289         val = 0xffffff0000000000;
1290         break;
1291 
1292     /*
1293      * XIVE PC & VC cache updates for EAS, NVT and END
1294      */
1295     case VC_EQC_CWATCH_SPEC:
1296         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1297         val = xive->regs[reg];
1298         break;
1299     case VC_EQC_CWATCH_DAT0:
1300         /*
1301          * Load DATA registers from cache with data requested by the
1302          * SPEC register
1303          */
1304         pnv_xive_end_cache_load(xive);
1305         val = xive->regs[reg];
1306         break;
1307     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1308         val = xive->regs[reg];
1309         break;
1310 
1311     case PC_VPC_CWATCH_SPEC:
1312         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1313         val = xive->regs[reg];
1314         break;
1315     case PC_VPC_CWATCH_DAT0:
1316         /*
1317          * Load DATA registers from cache with data requested by the
1318          * SPEC register
1319          */
1320         pnv_xive_nvt_cache_load(xive);
1321         val = xive->regs[reg];
1322         break;
1323     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1324         val = xive->regs[reg];
1325         break;
1326 
1327     case PC_VPC_SCRUB_TRIG:
1328     case VC_IVC_SCRUB_TRIG:
1329     case VC_EQC_SCRUB_TRIG:
1330         xive->regs[reg] &= ~VC_SCRUB_VALID;
1331         val = xive->regs[reg];
1332         break;
1333 
1334     /*
1335      * XIVE PC & VC cache invalidation
1336      */
1337     case PC_AT_KILL:
1338         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1339         val = xive->regs[reg];
1340         break;
1341     case VC_AT_MACRO_KILL:
1342         xive->regs[reg] &= ~VC_KILL_VALID;
1343         val = xive->regs[reg];
1344         break;
1345 
1346     /*
1347      * XIVE synchronisation
1348      */
1349     case VC_EQC_CONFIG:
1350         val = VC_EQC_SYNC_MASK;
1351         break;
1352 
1353     default:
1354         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1355     }
1356 
1357     return val;
1358 }
1359 
1360 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1361     .read = pnv_xive_ic_reg_read,
1362     .write = pnv_xive_ic_reg_write,
1363     .endianness = DEVICE_BIG_ENDIAN,
1364     .valid = {
1365         .min_access_size = 8,
1366         .max_access_size = 8,
1367     },
1368     .impl = {
1369         .min_access_size = 8,
1370         .max_access_size = 8,
1371     },
1372 };
1373 
1374 /*
1375  * IC - Notify MMIO port page (write only)
1376  */
1377 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1378 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1379 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1380 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1381 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1382 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1383 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1384 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1385 
1386 /* VC synchronisation */
1387 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1388 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1389 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1390 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1391 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1392 
1393 /* PC synchronisation */
1394 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1395 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1396 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1397 
1398 static void pnv_xive_end_notify(XiveRouter *xrtr, XiveEAS *eas)
1399 {
1400     PnvXive *xive = PNV_XIVE(xrtr);
1401     uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
1402     uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
1403     uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w);
1404     uint64_t end_vsd = xive->vsds[VST_TSEL_EQDT][end_blk];
1405 
1406     switch (GETFIELD(VSD_MODE, end_vsd)) {
1407     case VSD_MODE_EXCLUSIVE:
1408         /* Perform the END notification on the local IC. */
1409         xive_router_end_notify(xrtr, eas);
1410         break;
1411 
1412     case VSD_MODE_FORWARD: {
1413         MemTxResult result;
1414         uint64_t notif_port = end_vsd & VSD_ADDRESS_MASK;
1415         uint64_t data = XIVE_TRIGGER_END | XIVE_TRIGGER_PQ |
1416             be64_to_cpu(eas->w);
1417 
1418         /* Forward the store on the remote IC notify page. */
1419         address_space_stq_be(&address_space_memory, notif_port, data,
1420                              MEMTXATTRS_UNSPECIFIED, &result);
1421         if (result != MEMTX_OK) {
1422             xive_error(xive, "IC: Forward notif END %x/%x [%x] failed @%"
1423                        HWADDR_PRIx, end_blk, end_idx, end_data, notif_port);
1424             return;
1425         }
1426         break;
1427     }
1428 
1429     case VSD_MODE_INVALID:
1430     default:
1431         /* Set FIR */
1432         xive_error(xive, "IC: Invalid END VSD for block %x", end_blk);
1433         return;
1434     }
1435 }
1436 
1437 /*
1438  * The notify page can either be used to receive trigger events from
1439  * the HW controllers (PHB, PSI) or to reroute interrupts between
1440  * Interrupt controllers.
1441  */
1442 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1443 {
1444     uint8_t blk;
1445     uint32_t idx;
1446 
1447     trace_pnv_xive_ic_hw_trigger(addr, val);
1448 
1449     if (val & XIVE_TRIGGER_END) {
1450         val = cpu_to_be64(val);
1451         pnv_xive_end_notify(XIVE_ROUTER(xive), (XiveEAS *) &val);
1452         return;
1453     }
1454 
1455     /*
1456      * Forward the source event notification directly to the Router.
1457      * The source interrupt number should already be correctly encoded
1458      * with the chip block id by the sending device (PHB, PSI).
1459      */
1460     blk = XIVE_EAS_BLOCK(val);
1461     idx = XIVE_EAS_INDEX(val);
1462 
1463     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1464                        !!(val & XIVE_TRIGGER_PQ));
1465 }
1466 
1467 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1468                                      unsigned size)
1469 {
1470     PnvXive *xive = PNV_XIVE(opaque);
1471 
1472     /* VC: HW triggers */
1473     switch (addr) {
1474     case 0x000 ... 0x7FF:
1475         pnv_xive_ic_hw_trigger(opaque, addr, val);
1476         break;
1477 
1478     /* VC: Forwarded IRQs */
1479     case PNV_XIVE_FORWARD_IPI:
1480     case PNV_XIVE_FORWARD_HW:
1481     case PNV_XIVE_FORWARD_OS_ESC:
1482     case PNV_XIVE_FORWARD_HW_ESC:
1483     case PNV_XIVE_FORWARD_REDIS:
1484         /* TODO: forwarded IRQs. Should be like HW triggers */
1485         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1486                    addr, val);
1487         break;
1488 
1489     /* VC syncs */
1490     case PNV_XIVE_SYNC_IPI:
1491     case PNV_XIVE_SYNC_HW:
1492     case PNV_XIVE_SYNC_OS_ESC:
1493     case PNV_XIVE_SYNC_HW_ESC:
1494     case PNV_XIVE_SYNC_REDIS:
1495         break;
1496 
1497     /* PC syncs */
1498     case PNV_XIVE_SYNC_PULL:
1499     case PNV_XIVE_SYNC_PUSH:
1500     case PNV_XIVE_SYNC_VPC:
1501         break;
1502 
1503     default:
1504         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1505     }
1506 }
1507 
1508 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1509                                         unsigned size)
1510 {
1511     PnvXive *xive = PNV_XIVE(opaque);
1512 
1513     /* loads are invalid */
1514     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1515     return -1;
1516 }
1517 
1518 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1519     .read = pnv_xive_ic_notify_read,
1520     .write = pnv_xive_ic_notify_write,
1521     .endianness = DEVICE_BIG_ENDIAN,
1522     .valid = {
1523         .min_access_size = 8,
1524         .max_access_size = 8,
1525     },
1526     .impl = {
1527         .min_access_size = 8,
1528         .max_access_size = 8,
1529     },
1530 };
1531 
1532 /*
1533  * IC - LSI MMIO handlers (not modeled)
1534  */
1535 
1536 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1537                               uint64_t val, unsigned size)
1538 {
1539     PnvXive *xive = PNV_XIVE(opaque);
1540 
1541     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1542 }
1543 
1544 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1545 {
1546     PnvXive *xive = PNV_XIVE(opaque);
1547 
1548     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1549     return -1;
1550 }
1551 
1552 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1553     .read = pnv_xive_ic_lsi_read,
1554     .write = pnv_xive_ic_lsi_write,
1555     .endianness = DEVICE_BIG_ENDIAN,
1556     .valid = {
1557         .min_access_size = 8,
1558         .max_access_size = 8,
1559     },
1560     .impl = {
1561         .min_access_size = 8,
1562         .max_access_size = 8,
1563     },
1564 };
1565 
1566 /*
1567  * IC - Indirect TIMA MMIO handlers
1568  */
1569 
1570 /*
1571  * When the TIMA is accessed from the indirect page, the thread id of
1572  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1573  * use. This is used for resets and for debug purpose also.
1574  */
1575 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1576 {
1577     PnvChip *chip = xive->chip;
1578     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1579     PowerPCCPU *cpu = NULL;
1580     int pir;
1581 
1582     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1583         xive_error(xive, "IC: no indirect TIMA access in progress");
1584         return NULL;
1585     }
1586 
1587     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1588     cpu = pnv_chip_find_cpu(chip, pir);
1589     if (!cpu) {
1590         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1591         return NULL;
1592     }
1593 
1594     /* Check that HW thread is XIVE enabled */
1595     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1596         xive_error(xive, "IC: CPU %x is not enabled", pir);
1597     }
1598 
1599     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1600 }
1601 
1602 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1603                                    uint64_t value, unsigned size)
1604 {
1605     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1606 
1607     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1608 }
1609 
1610 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1611                                       unsigned size)
1612 {
1613     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1614 
1615     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1616 }
1617 
1618 static const MemoryRegionOps xive_tm_indirect_ops = {
1619     .read = xive_tm_indirect_read,
1620     .write = xive_tm_indirect_write,
1621     .endianness = DEVICE_BIG_ENDIAN,
1622     .valid = {
1623         .min_access_size = 1,
1624         .max_access_size = 8,
1625     },
1626     .impl = {
1627         .min_access_size = 1,
1628         .max_access_size = 8,
1629     },
1630 };
1631 
1632 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1633                               uint64_t value, unsigned size)
1634 {
1635     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1636     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1637     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1638 
1639     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1640 }
1641 
1642 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1643 {
1644     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1645     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1646     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1647 
1648     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1649 }
1650 
1651 const MemoryRegionOps pnv_xive_tm_ops = {
1652     .read = pnv_xive_tm_read,
1653     .write = pnv_xive_tm_write,
1654     .endianness = DEVICE_BIG_ENDIAN,
1655     .valid = {
1656         .min_access_size = 1,
1657         .max_access_size = 8,
1658     },
1659     .impl = {
1660         .min_access_size = 1,
1661         .max_access_size = 8,
1662     },
1663 };
1664 
1665 /*
1666  * Interrupt controller XSCOM region.
1667  */
1668 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1669 {
1670     switch (addr >> 3) {
1671     case X_VC_EQC_CONFIG:
1672         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1673         return VC_EQC_SYNC_MASK;
1674     default:
1675         return pnv_xive_ic_reg_read(opaque, addr, size);
1676     }
1677 }
1678 
1679 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1680                                 uint64_t val, unsigned size)
1681 {
1682     pnv_xive_ic_reg_write(opaque, addr, val, size);
1683 }
1684 
1685 static const MemoryRegionOps pnv_xive_xscom_ops = {
1686     .read = pnv_xive_xscom_read,
1687     .write = pnv_xive_xscom_write,
1688     .endianness = DEVICE_BIG_ENDIAN,
1689     .valid = {
1690         .min_access_size = 8,
1691         .max_access_size = 8,
1692     },
1693     .impl = {
1694         .min_access_size = 8,
1695         .max_access_size = 8,
1696     }
1697 };
1698 
1699 /*
1700  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1701  */
1702 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1703                                  unsigned size)
1704 {
1705     PnvXive *xive = PNV_XIVE(opaque);
1706     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1707     uint64_t edt_type = 0;
1708     uint64_t edt_offset;
1709     MemTxResult result;
1710     AddressSpace *edt_as = NULL;
1711     uint64_t ret = -1;
1712 
1713     if (edt_index < XIVE_TABLE_EDT_MAX) {
1714         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1715     }
1716 
1717     switch (edt_type) {
1718     case CQ_TDR_EDT_IPI:
1719         edt_as = &xive->ipi_as;
1720         break;
1721     case CQ_TDR_EDT_EQ:
1722         edt_as = &xive->end_as;
1723         break;
1724     default:
1725         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1726         return -1;
1727     }
1728 
1729     /* Remap the offset for the targeted address space */
1730     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1731 
1732     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1733                             &result);
1734 
1735     if (result != MEMTX_OK) {
1736         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1737                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1738                    offset, edt_offset);
1739         return -1;
1740     }
1741 
1742     return ret;
1743 }
1744 
1745 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1746                               uint64_t val, unsigned size)
1747 {
1748     PnvXive *xive = PNV_XIVE(opaque);
1749     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1750     uint64_t edt_type = 0;
1751     uint64_t edt_offset;
1752     MemTxResult result;
1753     AddressSpace *edt_as = NULL;
1754 
1755     if (edt_index < XIVE_TABLE_EDT_MAX) {
1756         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1757     }
1758 
1759     switch (edt_type) {
1760     case CQ_TDR_EDT_IPI:
1761         edt_as = &xive->ipi_as;
1762         break;
1763     case CQ_TDR_EDT_EQ:
1764         edt_as = &xive->end_as;
1765         break;
1766     default:
1767         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1768                    offset);
1769         return;
1770     }
1771 
1772     /* Remap the offset for the targeted address space */
1773     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1774 
1775     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1776     if (result != MEMTX_OK) {
1777         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1778     }
1779 }
1780 
1781 static const MemoryRegionOps pnv_xive_vc_ops = {
1782     .read = pnv_xive_vc_read,
1783     .write = pnv_xive_vc_write,
1784     .endianness = DEVICE_BIG_ENDIAN,
1785     .valid = {
1786         .min_access_size = 8,
1787         .max_access_size = 8,
1788     },
1789     .impl = {
1790         .min_access_size = 8,
1791         .max_access_size = 8,
1792     },
1793 };
1794 
1795 /*
1796  * Presenter Controller MMIO region. Points to the NVT sets.
1797  *
1798  * HW implements all possible mem ops to the underlying NVT structure
1799  * but QEMU does not need to be so precise. The model implementation
1800  * simply returns the RAM address of the NVT structure which is then
1801  * used by pnv_xive_vst_write/read to perform the RAM operation.
1802  */
1803 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr offset, unsigned size)
1804 {
1805     PnvXive *xive = PNV_XIVE(opaque);
1806     uint32_t nvt_idx = offset >> xive->pc_shift;
1807     uint8_t blk = pnv_xive_block_id(xive); /* TODO: VDT -> block xlate */
1808 
1809     return pnv_xive_vst_addr(xive, VST_TSEL_VPDT, blk, nvt_idx);
1810 }
1811 
1812 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1813                               uint64_t value, unsigned size)
1814 {
1815     PnvXive *xive = PNV_XIVE(opaque);
1816 
1817     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1818 }
1819 
1820 static const MemoryRegionOps pnv_xive_pc_ops = {
1821     .read = pnv_xive_pc_read,
1822     .write = pnv_xive_pc_write,
1823     .endianness = DEVICE_BIG_ENDIAN,
1824     .valid = {
1825         .min_access_size = 8,
1826         .max_access_size = 8,
1827     },
1828     .impl = {
1829         .min_access_size = 8,
1830         .max_access_size = 8,
1831     },
1832 };
1833 
1834 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1835                                     Monitor *mon)
1836 {
1837     uint8_t  eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1838     uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1839 
1840     if (!xive_nvt_is_valid(nvt)) {
1841         return;
1842     }
1843 
1844     monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1845                    eq_blk, eq_idx,
1846                    xive_get_field32(NVT_W4_IPB, nvt->w4));
1847 }
1848 
1849 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1850 {
1851     XiveRouter *xrtr = XIVE_ROUTER(xive);
1852     uint8_t blk = pnv_xive_block_id(xive);
1853     uint8_t chip_id = xive->chip->chip_id;
1854     uint32_t srcno0 = XIVE_EAS(blk, 0);
1855     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1856     XiveEAS eas;
1857     XiveEND end;
1858     XiveNVT nvt;
1859     int i;
1860     uint64_t xive_nvt_per_subpage;
1861     g_autoptr(GString) buf = g_string_new("");
1862     g_autoptr(HumanReadableText) info = NULL;
1863 
1864     g_string_append_printf(buf, "XIVE[%x] #%d Source %08x .. %08x\n",
1865                            chip_id, blk, srcno0, srcno0 + nr_ipis - 1);
1866     xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
1867 
1868     g_string_append_printf(buf, "XIVE[%x] #%d EAT %08x .. %08x\n",
1869                            chip_id, blk, srcno0, srcno0 + nr_ipis - 1);
1870     for (i = 0; i < nr_ipis; i++) {
1871         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1872             break;
1873         }
1874         if (!xive_eas_is_masked(&eas)) {
1875             xive_eas_pic_print_info(&eas, i, buf);
1876         }
1877     }
1878 
1879     g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1880     i = 0;
1881     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1882         xive_end_pic_print_info(&end, i++, buf);
1883     }
1884 
1885     g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
1886                            chip_id, blk);
1887     i = 0;
1888     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1889         xive_end_eas_pic_print_info(&end, i++, buf);
1890     }
1891     info = human_readable_text_from_str(buf);
1892     monitor_puts(mon, info->human_readable_text);
1893 
1894     monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1895                    0, XIVE_NVT_COUNT - 1);
1896     xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1897     for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1898         while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1899             xive_nvt_pic_print_info(&nvt, i++, mon);
1900         }
1901     }
1902 }
1903 
1904 static void pnv_xive_reset(void *dev)
1905 {
1906     PnvXive *xive = PNV_XIVE(dev);
1907     XiveSource *xsrc = &xive->ipi_source;
1908     XiveENDSource *end_xsrc = &xive->end_source;
1909 
1910     /* Default page size (Should be changed at runtime to 64k) */
1911     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1912 
1913     /* Clear subregions */
1914     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1915         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1916     }
1917 
1918     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1919         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1920     }
1921 
1922     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1923         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1924     }
1925 
1926     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1927         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1928     }
1929 }
1930 
1931 static void pnv_xive_init(Object *obj)
1932 {
1933     PnvXive *xive = PNV_XIVE(obj);
1934 
1935     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1936                             TYPE_XIVE_SOURCE);
1937     object_initialize_child(obj, "end_source", &xive->end_source,
1938                             TYPE_XIVE_END_SOURCE);
1939 }
1940 
1941 /*
1942  *  Maximum number of IRQs and ENDs supported by HW
1943  */
1944 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1945 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1946 
1947 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1948 {
1949     PnvXive *xive = PNV_XIVE(dev);
1950     PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1951     XiveSource *xsrc = &xive->ipi_source;
1952     XiveENDSource *end_xsrc = &xive->end_source;
1953     Error *local_err = NULL;
1954 
1955     pxc->parent_realize(dev, &local_err);
1956     if (local_err) {
1957         error_propagate(errp, local_err);
1958         return;
1959     }
1960 
1961     assert(xive->chip);
1962 
1963     /*
1964      * The XiveSource and XiveENDSource objects are realized with the
1965      * maximum allowed HW configuration. The ESB MMIO regions will be
1966      * resized dynamically when the controller is configured by the FW
1967      * to limit accesses to resources not provisioned.
1968      */
1969     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1970                             &error_fatal);
1971     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1972     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1973         return;
1974     }
1975 
1976     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1977                             &error_fatal);
1978     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1979                              &error_abort);
1980     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1981         return;
1982     }
1983 
1984     /* Default page size. Generally changed at runtime to 64k */
1985     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1986 
1987     /* XSCOM region, used for initial configuration of the BARs */
1988     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1989                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1990 
1991     /* Interrupt controller MMIO regions */
1992     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1993                        PNV9_XIVE_IC_SIZE);
1994 
1995     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1996                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1997     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1998                           &pnv_xive_ic_notify_ops,
1999                           xive, "xive-ic-notify", 1 << xive->ic_shift);
2000     xive->ic_notify_mmio.disable_reentrancy_guard = true;
2001 
2002     /* The Pervasive LSI trigger and EOI pages (not modeled) */
2003     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
2004                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
2005 
2006     /* Thread Interrupt Management Area (Indirect) */
2007     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
2008                           &xive_tm_indirect_ops,
2009                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
2010     /*
2011      * Overall Virtualization Controller MMIO region containing the
2012      * IPI ESB pages and END ESB pages. The layout is defined by the
2013      * EDT "Domain table" and the accesses are dispatched using
2014      * address spaces for each.
2015      */
2016     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
2017                           "xive-vc", PNV9_XIVE_VC_SIZE);
2018 
2019     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
2020                        PNV9_XIVE_VC_SIZE);
2021     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
2022     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
2023                        PNV9_XIVE_VC_SIZE);
2024     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
2025 
2026     /*
2027      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
2028      * VC region. Their size is configured by the FW in the EDT table.
2029      */
2030     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
2031     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
2032 
2033     /* Presenter Controller MMIO region (not modeled) */
2034     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
2035                           "xive-pc", PNV9_XIVE_PC_SIZE);
2036     xive->pc_mmio.disable_reentrancy_guard = true;
2037 
2038     /* Thread Interrupt Management Area (Direct) */
2039     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
2040                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
2041 
2042     qemu_register_reset(pnv_xive_reset, dev);
2043 }
2044 
2045 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
2046                              int xscom_offset)
2047 {
2048     const char compat[] = "ibm,power9-xive-x";
2049     char *name;
2050     int offset;
2051     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
2052     uint32_t reg[] = {
2053         cpu_to_be32(lpc_pcba),
2054         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
2055     };
2056 
2057     name = g_strdup_printf("xive@%x", lpc_pcba);
2058     offset = fdt_add_subnode(fdt, xscom_offset, name);
2059     _FDT(offset);
2060     g_free(name);
2061 
2062     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2063     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
2064                       sizeof(compat))));
2065     return 0;
2066 }
2067 
2068 static Property pnv_xive_properties[] = {
2069     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
2070     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
2071     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
2072     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
2073     /* The PnvChip id identifies the XIVE interrupt controller. */
2074     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
2075     DEFINE_PROP_END_OF_LIST(),
2076 };
2077 
2078 static void pnv_xive_class_init(ObjectClass *klass, void *data)
2079 {
2080     DeviceClass *dc = DEVICE_CLASS(klass);
2081     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2082     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
2083     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2084     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2085     PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
2086 
2087     xdc->dt_xscom = pnv_xive_dt_xscom;
2088 
2089     dc->desc = "PowerNV XIVE Interrupt Controller";
2090     device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
2091     dc->realize = pnv_xive_realize;
2092     device_class_set_props(dc, pnv_xive_properties);
2093 
2094     xrc->get_eas = pnv_xive_get_eas;
2095     xrc->get_pq = pnv_xive_get_pq;
2096     xrc->set_pq = pnv_xive_set_pq;
2097     xrc->get_end = pnv_xive_get_end;
2098     xrc->write_end = pnv_xive_write_end;
2099     xrc->get_nvt = pnv_xive_get_nvt;
2100     xrc->write_nvt = pnv_xive_write_nvt;
2101     xrc->get_block_id = pnv_xive_get_block_id;
2102     xrc->end_notify = pnv_xive_end_notify;
2103 
2104     xnc->notify = pnv_xive_notify;
2105     xpc->match_nvt  = pnv_xive_match_nvt;
2106     xpc->get_config = pnv_xive_presenter_get_config;
2107 };
2108 
2109 static const TypeInfo pnv_xive_info = {
2110     .name          = TYPE_PNV_XIVE,
2111     .parent        = TYPE_XIVE_ROUTER,
2112     .instance_init = pnv_xive_init,
2113     .instance_size = sizeof(PnvXive),
2114     .class_init    = pnv_xive_class_init,
2115     .class_size    = sizeof(PnvXiveClass),
2116     .interfaces    = (InterfaceInfo[]) {
2117         { TYPE_PNV_XSCOM_INTERFACE },
2118         { }
2119     }
2120 };
2121 
2122 static void pnv_xive_register_types(void)
2123 {
2124     type_register_static(&pnv_xive_info);
2125 }
2126 
2127 type_init(pnv_xive_register_types)
2128