xref: /openbmc/qemu/hw/intc/pnv_xive2.c (revision e6024fd8328f66f26a181764b989885c1daad2d8)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model  (POWER10)
3  *
4  * Copyright (c) 2019-2022, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "qapi/type-helpers.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "monitor/monitor.h"
18 #include "hw/ppc/fdt.h"
19 #include "hw/ppc/pnv.h"
20 #include "hw/ppc/pnv_chip.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/xive2.h"
24 #include "hw/ppc/pnv_xive.h"
25 #include "hw/ppc/xive_regs.h"
26 #include "hw/ppc/xive2_regs.h"
27 #include "hw/ppc/ppc.h"
28 #include "hw/qdev-properties.h"
29 #include "sysemu/reset.h"
30 
31 #include <libfdt.h>
32 
33 #include "pnv_xive2_regs.h"
34 
35 #undef XIVE2_DEBUG
36 
37 /*
38  * Virtual structures table (VST)
39  */
40 #define SBE_PER_BYTE   4
41 
42 typedef struct XiveVstInfo {
43     const char *name;
44     uint32_t    size;
45     uint32_t    max_blocks;
46 } XiveVstInfo;
47 
48 static const XiveVstInfo vst_infos[] = {
49 
50     [VST_EAS]  = { "EAT",  sizeof(Xive2Eas),  16 },
51     [VST_ESB]  = { "ESB",  1,                  16 },
52     [VST_END]  = { "ENDT", sizeof(Xive2End),  16 },
53 
54     [VST_NVP]  = { "NVPT", sizeof(Xive2Nvp),  16 },
55     [VST_NVG]  = { "NVGT", sizeof(Xive2Nvgc), 16 },
56     [VST_NVC]  = { "NVCT", sizeof(Xive2Nvgc), 16 },
57 
58     [VST_IC]  =  { "IC",   1 /* ? */         , 16 }, /* Topology # */
59     [VST_SYNC] = { "SYNC", 1 /* ? */         , 16 }, /* Topology # */
60 
61     /*
62      * This table contains the backing store pages for the interrupt
63      * fifos of the VC sub-engine in case of overflow.
64      *
65      * 0 - IPI,
66      * 1 - HWD,
67      * 2 - NxC,
68      * 3 - INT,
69      * 4 - OS-Queue,
70      * 5 - Pool-Queue,
71      * 6 - Hard-Queue
72      */
73     [VST_ERQ]  = { "ERQ",  1,                   VC_QUEUE_COUNT },
74 };
75 
76 #define xive2_error(xive, fmt, ...)                                      \
77     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
78                   (xive)->chip->chip_id, ## __VA_ARGS__);
79 
80 /*
81  * TODO: Document block id override
82  */
83 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
84 {
85     uint8_t blk = xive->chip->chip_id;
86     uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
87 
88     if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
89         blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
90     }
91 
92     return blk;
93 }
94 
95 /*
96  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
97  * of the chips is good enough.
98  *
99  * TODO: Block scope support
100  */
101 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
102 {
103     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
104     int i;
105 
106     for (i = 0; i < pnv->num_chips; i++) {
107         Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
108         PnvXive2 *xive = &chip10->xive;
109 
110         if (pnv_xive2_block_id(xive) == blk) {
111             return xive;
112         }
113     }
114     return NULL;
115 }
116 
117 /*
118  * VST accessors for ESB, EAT, ENDT, NVP
119  *
120  * Indirect VST tables are arrays of VSDs pointing to a page (of same
121  * size). Each page is a direct VST table.
122  */
123 
124 #define XIVE_VSD_SIZE 8
125 
126 /* Indirect page size can be 4K, 64K, 2M, 16M. */
127 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
128 {
129      return page_shift == 12 || page_shift == 16 ||
130          page_shift == 21 || page_shift == 24;
131 }
132 
133 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
134                                           uint64_t vsd, uint32_t idx)
135 {
136     const XiveVstInfo *info = &vst_infos[type];
137     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
138     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
139     uint32_t idx_max;
140 
141     idx_max = vst_tsize / info->size - 1;
142     if (idx > idx_max) {
143 #ifdef XIVE2_DEBUG
144         xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
145                    info->name, idx, idx_max);
146 #endif
147         return 0;
148     }
149 
150     return vst_addr + idx * info->size;
151 }
152 
153 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
154                                             uint64_t vsd, uint32_t idx)
155 {
156     const XiveVstInfo *info = &vst_infos[type];
157     uint64_t vsd_addr;
158     uint32_t vsd_idx;
159     uint32_t page_shift;
160     uint32_t vst_per_page;
161 
162     /* Get the page size of the indirect table. */
163     vsd_addr = vsd & VSD_ADDRESS_MASK;
164     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
165 
166     if (!(vsd & VSD_ADDRESS_MASK)) {
167 #ifdef XIVE2_DEBUG
168         xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
169 #endif
170         return 0;
171     }
172 
173     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
174 
175     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
176         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
177                    page_shift);
178         return 0;
179     }
180 
181     vst_per_page = (1ull << page_shift) / info->size;
182     vsd_idx = idx / vst_per_page;
183 
184     /* Load the VSD we are looking for, if not already done */
185     if (vsd_idx) {
186         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
187         ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
188                    MEMTXATTRS_UNSPECIFIED);
189 
190         if (!(vsd & VSD_ADDRESS_MASK)) {
191 #ifdef XIVE2_DEBUG
192             xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
193 #endif
194             return 0;
195         }
196 
197         /*
198          * Check that the pages have a consistent size across the
199          * indirect table
200          */
201         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
202             xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
203                        info->name, idx);
204             return 0;
205         }
206     }
207 
208     return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
209 }
210 
211 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
212                                    uint32_t idx)
213 {
214     const XiveVstInfo *info = &vst_infos[type];
215     uint64_t vsd;
216 
217     if (blk >= info->max_blocks) {
218         xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
219                    blk, info->name, idx);
220         return 0;
221     }
222 
223     vsd = xive->vsds[type][blk];
224 
225     /* Remote VST access */
226     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
227         xive = pnv_xive2_get_remote(blk);
228 
229         return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
230     }
231 
232     if (VSD_INDIRECT & vsd) {
233         return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
234     }
235 
236     return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
237 }
238 
239 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
240                              uint32_t idx, void *data)
241 {
242     const XiveVstInfo *info = &vst_infos[type];
243     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
244     MemTxResult result;
245 
246     if (!addr) {
247         return -1;
248     }
249 
250     result = address_space_read(&address_space_memory, addr,
251                                 MEMTXATTRS_UNSPECIFIED, data,
252                                 info->size);
253     if (result != MEMTX_OK) {
254         xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
255                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
256         return -1;
257     }
258     return 0;
259 }
260 
261 #define XIVE_VST_WORD_ALL -1
262 
263 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
264                                uint32_t idx, void *data, uint32_t word_number)
265 {
266     const XiveVstInfo *info = &vst_infos[type];
267     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
268     MemTxResult result;
269 
270     if (!addr) {
271         return -1;
272     }
273 
274     if (word_number == XIVE_VST_WORD_ALL) {
275         result = address_space_write(&address_space_memory, addr,
276                                      MEMTXATTRS_UNSPECIFIED, data,
277                                      info->size);
278     } else {
279         result = address_space_write(&address_space_memory,
280                                      addr + word_number * 4,
281                                      MEMTXATTRS_UNSPECIFIED,
282                                      data + word_number * 4, 4);
283     }
284 
285     if (result != MEMTX_OK) {
286         xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
287                    "for VST %s %x/%x\n", addr, info->name, blk, idx);
288         return -1;
289     }
290     return 0;
291 }
292 
293 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
294                              uint8_t *pq)
295 {
296     PnvXive2 *xive = PNV_XIVE2(xrtr);
297 
298     if (pnv_xive2_block_id(xive) != blk) {
299         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
300         return -1;
301     }
302 
303     *pq = xive_source_esb_get(&xive->ipi_source, idx);
304     return 0;
305 }
306 
307 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
308                              uint8_t *pq)
309 {
310     PnvXive2 *xive = PNV_XIVE2(xrtr);
311 
312     if (pnv_xive2_block_id(xive) != blk) {
313         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
314         return -1;
315     }
316 
317     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
318     return 0;
319 }
320 
321 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
322                              Xive2End *end)
323 {
324     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
325 }
326 
327 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
328                                Xive2End *end, uint8_t word_number)
329 {
330     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
331                               word_number);
332 }
333 
334 static int pnv_xive2_end_update(PnvXive2 *xive)
335 {
336     uint8_t  blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
337                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
338     uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
339                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
340     int i;
341     uint64_t endc_watch[4];
342 
343     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
344         endc_watch[i] =
345             cpu_to_be64(xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i]);
346     }
347 
348     return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
349                               XIVE_VST_WORD_ALL);
350 }
351 
352 static void pnv_xive2_end_cache_load(PnvXive2 *xive)
353 {
354     uint8_t  blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
355                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
356     uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
357                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
358     uint64_t endc_watch[4] = { 0 };
359     int i;
360 
361     if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
362         xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
363     }
364 
365     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
366         xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i] =
367             be64_to_cpu(endc_watch[i]);
368     }
369 }
370 
371 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
372                              Xive2Nvp *nvp)
373 {
374     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
375 }
376 
377 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
378                                Xive2Nvp *nvp, uint8_t word_number)
379 {
380     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
381                               word_number);
382 }
383 
384 static int pnv_xive2_nvp_update(PnvXive2 *xive)
385 {
386     uint8_t  blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
387                             xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
388     uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
389                             xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
390     int i;
391     uint64_t nxc_watch[4];
392 
393     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
394         nxc_watch[i] =
395             cpu_to_be64(xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i]);
396     }
397 
398     return pnv_xive2_vst_write(xive, VST_NVP, blk, idx, nxc_watch,
399                               XIVE_VST_WORD_ALL);
400 }
401 
402 static void pnv_xive2_nvp_cache_load(PnvXive2 *xive)
403 {
404     uint8_t  blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
405                            xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
406     uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
407                            xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
408     uint64_t nxc_watch[4] = { 0 };
409     int i;
410 
411     if (pnv_xive2_vst_read(xive, VST_NVP, blk, idx, nxc_watch)) {
412         xive2_error(xive, "VST: no NVP entry %x/%x !?", blk, idx);
413     }
414 
415     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
416         xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i] =
417             be64_to_cpu(nxc_watch[i]);
418     }
419 }
420 
421 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
422                             Xive2Eas *eas)
423 {
424     PnvXive2 *xive = PNV_XIVE2(xrtr);
425 
426     if (pnv_xive2_block_id(xive) != blk) {
427         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
428         return -1;
429     }
430 
431     return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
432 }
433 
434 static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
435 {
436     PnvXive2 *xive = PNV_XIVE2(xrtr);
437     uint32_t cfg = 0;
438 
439     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
440         cfg |= XIVE2_GEN1_TIMA_OS;
441     }
442 
443     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
444         cfg |= XIVE2_VP_SAVE_RESTORE;
445     }
446 
447     if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
448               xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
449         cfg |= XIVE2_THREADID_8BITS;
450     }
451 
452     return cfg;
453 }
454 
455 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
456 {
457     int pir = ppc_cpu_pir(cpu);
458     uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
459     uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
460     uint32_t bit = pir & 0x3f;
461 
462     return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
463 }
464 
465 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
466                                uint8_t nvt_blk, uint32_t nvt_idx,
467                                bool cam_ignore, uint8_t priority,
468                                uint32_t logic_serv, XiveTCTXMatch *match)
469 {
470     PnvXive2 *xive = PNV_XIVE2(xptr);
471     PnvChip *chip = xive->chip;
472     int count = 0;
473     int i, j;
474     bool gen1_tima_os =
475         xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
476 
477     for (i = 0; i < chip->nr_cores; i++) {
478         PnvCore *pc = chip->cores[i];
479         CPUCore *cc = CPU_CORE(pc);
480 
481         for (j = 0; j < cc->nr_threads; j++) {
482             PowerPCCPU *cpu = pc->threads[j];
483             XiveTCTX *tctx;
484             int ring;
485 
486             if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
487                 continue;
488             }
489 
490             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
491 
492             if (gen1_tima_os) {
493                 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
494                                                  nvt_idx, cam_ignore,
495                                                  logic_serv);
496             } else {
497                 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
498                                                    nvt_idx, cam_ignore,
499                                                    logic_serv);
500             }
501 
502             /*
503              * Save the context and follow on to catch duplicates,
504              * that we don't support yet.
505              */
506             if (ring != -1) {
507                 if (match->tctx) {
508                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
509                                   "thread context NVT %x/%x\n",
510                                   nvt_blk, nvt_idx);
511                     return false;
512                 }
513 
514                 match->ring = ring;
515                 match->tctx = tctx;
516                 count++;
517             }
518         }
519     }
520 
521     return count;
522 }
523 
524 static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
525 {
526     PnvXive2 *xive = PNV_XIVE2(xptr);
527     uint32_t cfg = 0;
528 
529     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
530         cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
531     }
532     return cfg;
533 }
534 
535 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
536 {
537     return pnv_xive2_block_id(PNV_XIVE2(xrtr));
538 }
539 
540 /*
541  * The TIMA MMIO space is shared among the chips and to identify the
542  * chip from which the access is being done, we extract the chip id
543  * from the PIR.
544  */
545 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
546 {
547     int pir = ppc_cpu_pir(cpu);
548     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
549     PnvXive2 *xive = PNV_XIVE2(xptr);
550 
551     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
552         xive2_error(xive, "IC: CPU %x is not enabled", pir);
553     }
554     return xive;
555 }
556 
557 /*
558  * The internal sources of the interrupt controller have no knowledge
559  * of the XIVE2 chip on which they reside. Encode the block id in the
560  * source interrupt number before forwarding the source event
561  * notification to the Router. This is required on a multichip system.
562  */
563 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
564 {
565     PnvXive2 *xive = PNV_XIVE2(xn);
566     uint8_t blk = pnv_xive2_block_id(xive);
567 
568     xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
569 }
570 
571 /*
572  * Set Translation Tables
573  *
574  * TODO add support for multiple sets
575  */
576 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
577 {
578     uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
579     uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
580                                   xive->cq_regs[CQ_TAR >> 3]);
581 
582     switch (tsel) {
583     case CQ_TAR_NVPG:
584     case CQ_TAR_ESB:
585     case CQ_TAR_END:
586         xive->tables[tsel][entry] = val;
587         break;
588     default:
589         xive2_error(xive, "IC: unsupported table %d", tsel);
590         return -1;
591     }
592 
593     if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
594         xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
595                      xive->cq_regs[CQ_TAR >> 3], ++entry);
596     }
597 
598     return 0;
599 }
600 /*
601  * Virtual Structure Tables (VST) configuration
602  */
603 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
604                                         uint8_t blk, uint64_t vsd)
605 {
606     Xive2EndSource *end_xsrc = &xive->end_source;
607     XiveSource *xsrc = &xive->ipi_source;
608     const XiveVstInfo *info = &vst_infos[type];
609     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
610     uint64_t vst_tsize = 1ull << page_shift;
611     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
612 
613     /* Basic checks */
614 
615     if (VSD_INDIRECT & vsd) {
616         if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
617             xive2_error(xive, "VST: invalid %s page shift %d", info->name,
618                        page_shift);
619             return;
620         }
621     }
622 
623     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
624         xive2_error(xive, "VST: %s table address 0x%"PRIx64
625                     " is not aligned with page shift %d",
626                     info->name, vst_addr, page_shift);
627         return;
628     }
629 
630     /* Record the table configuration (in SRAM on HW) */
631     xive->vsds[type][blk] = vsd;
632 
633     /* Now tune the models with the configuration provided by the FW */
634 
635     switch (type) {
636     case VST_ESB:
637         /*
638          * Backing store pages for the source PQ bits. The model does
639          * not use these PQ bits backed in RAM because the XiveSource
640          * model has its own.
641          *
642          * If the table is direct, we can compute the number of PQ
643          * entries provisioned by FW (such as skiboot) and resize the
644          * ESB window accordingly.
645          */
646         if (!(VSD_INDIRECT & vsd)) {
647             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
648                                    * (1ull << xsrc->esb_shift));
649         }
650 
651         memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
652         break;
653 
654     case VST_EAS:  /* Nothing to be done */
655         break;
656 
657     case VST_END:
658         /*
659          * Backing store pages for the END.
660          */
661         if (!(VSD_INDIRECT & vsd)) {
662             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
663                                    * (1ull << end_xsrc->esb_shift));
664         }
665         memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
666         break;
667 
668     case VST_NVP:  /* Not modeled */
669     case VST_NVG:  /* Not modeled */
670     case VST_NVC:  /* Not modeled */
671     case VST_IC:   /* Not modeled */
672     case VST_SYNC: /* Not modeled */
673     case VST_ERQ:  /* Not modeled */
674         break;
675 
676     default:
677         g_assert_not_reached();
678     }
679 }
680 
681 /*
682  * Both PC and VC sub-engines are configured as each use the Virtual
683  * Structure Tables
684  */
685 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
686 {
687     uint8_t mode = GETFIELD(VSD_MODE, vsd);
688     uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
689                             xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
690     uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
691                            xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
692     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
693 
694     if (type > VST_ERQ) {
695         xive2_error(xive, "VST: invalid table type %d", type);
696         return;
697     }
698 
699     if (blk >= vst_infos[type].max_blocks) {
700         xive2_error(xive, "VST: invalid block id %d for"
701                       " %s table", blk, vst_infos[type].name);
702         return;
703     }
704 
705     if (!vst_addr) {
706         xive2_error(xive, "VST: invalid %s table address",
707                    vst_infos[type].name);
708         return;
709     }
710 
711     switch (mode) {
712     case VSD_MODE_FORWARD:
713         xive->vsds[type][blk] = vsd;
714         break;
715 
716     case VSD_MODE_EXCLUSIVE:
717         pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
718         break;
719 
720     default:
721         xive2_error(xive, "VST: unsupported table mode %d", mode);
722         return;
723     }
724 }
725 
726 /*
727  * MMIO handlers
728  */
729 
730 
731 /*
732  * IC BAR layout
733  *
734  * Page 0: Internal CQ register accesses (reads & writes)
735  * Page 1: Internal PC register accesses (reads & writes)
736  * Page 2: Internal VC register accesses (reads & writes)
737  * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
738  * Page 4: Notify Port page (writes only, w/data),
739  * Page 5: Reserved
740  * Page 6: Sync Poll page (writes only, dataless)
741  * Page 7: Sync Inject page (writes only, dataless)
742  * Page 8: LSI Trigger page (writes only, dataless)
743  * Page 9: LSI SB Management page (reads & writes dataless)
744  * Pages 10-255: Reserved
745  * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
746  *                covering the 128 threads in P10.
747  * Pages 384-511: Reserved
748  */
749 typedef struct PnvXive2Region {
750     const char *name;
751     uint32_t pgoff;
752     uint32_t pgsize;
753     const MemoryRegionOps *ops;
754 } PnvXive2Region;
755 
756 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
757 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
758 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
759 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
760 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
761 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
762 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
763 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
764 
765 /* 512 pages. 4K: 2M range, 64K: 32M range */
766 static const PnvXive2Region pnv_xive2_ic_regions[] = {
767     { "xive-ic-cq",        0,   1,   &pnv_xive2_ic_cq_ops     },
768     { "xive-ic-vc",        1,   1,   &pnv_xive2_ic_vc_ops     },
769     { "xive-ic-pc",        2,   1,   &pnv_xive2_ic_pc_ops     },
770     { "xive-ic-tctxt",     3,   1,   &pnv_xive2_ic_tctxt_ops  },
771     { "xive-ic-notify",    4,   1,   &pnv_xive2_ic_notify_ops },
772     /* page 5 reserved */
773     { "xive-ic-sync",      6,   2,   &pnv_xive2_ic_sync_ops   },
774     { "xive-ic-lsi",       8,   2,   &pnv_xive2_ic_lsi_ops    },
775     /* pages 10-255 reserved */
776     { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops  },
777     /* pages 384-511 reserved */
778 };
779 
780 /*
781  * CQ operations
782  */
783 
784 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
785                                         unsigned size)
786 {
787     PnvXive2 *xive = PNV_XIVE2(opaque);
788     uint32_t reg = offset >> 3;
789     uint64_t val = 0;
790 
791     switch (offset) {
792     case CQ_XIVE_CAP: /* Set at reset */
793     case CQ_XIVE_CFG:
794         val = xive->cq_regs[reg];
795         break;
796     case CQ_MSGSND: /* TODO check the #cores of the machine */
797         val = 0xffffffff00000000;
798         break;
799     case CQ_CFG_PB_GEN:
800         val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
801         break;
802     default:
803         xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
804     }
805 
806     return val;
807 }
808 
809 static uint64_t pnv_xive2_bar_size(uint64_t val)
810 {
811     return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
812 }
813 
814 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
815                                   uint64_t val, unsigned size)
816 {
817     PnvXive2 *xive = PNV_XIVE2(opaque);
818     MemoryRegion *sysmem = get_system_memory();
819     uint32_t reg = offset >> 3;
820     int i;
821 
822     switch (offset) {
823     case CQ_XIVE_CFG:
824     case CQ_RST_CTL: /* TODO: reset all BARs */
825         break;
826 
827     case CQ_IC_BAR:
828         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
829         if (!(val & CQ_IC_BAR_VALID)) {
830             xive->ic_base = 0;
831             if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
832                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
833                     memory_region_del_subregion(&xive->ic_mmio,
834                                                 &xive->ic_mmios[i]);
835                 }
836                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
837             }
838         } else {
839             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
840             if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
841                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
842                     memory_region_add_subregion(&xive->ic_mmio,
843                                pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
844                                &xive->ic_mmios[i]);
845                 }
846                 memory_region_add_subregion(sysmem, xive->ic_base,
847                                             &xive->ic_mmio);
848             }
849         }
850         break;
851 
852     case CQ_TM_BAR:
853         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
854         if (!(val & CQ_TM_BAR_VALID)) {
855             xive->tm_base = 0;
856             if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
857                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
858             }
859         } else {
860             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
861             if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
862                 memory_region_add_subregion(sysmem, xive->tm_base,
863                                             &xive->tm_mmio);
864             }
865         }
866         break;
867 
868     case CQ_ESB_BAR:
869         xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
870         if (!(val & CQ_BAR_VALID)) {
871             xive->esb_base = 0;
872             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
873                 memory_region_del_subregion(sysmem, &xive->esb_mmio);
874             }
875         } else {
876             xive->esb_base = val & CQ_BAR_ADDR;
877             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
878                 memory_region_set_size(&xive->esb_mmio,
879                                        pnv_xive2_bar_size(val));
880                 memory_region_add_subregion(sysmem, xive->esb_base,
881                                             &xive->esb_mmio);
882             }
883         }
884         break;
885 
886     case CQ_END_BAR:
887         xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
888         if (!(val & CQ_BAR_VALID)) {
889             xive->end_base = 0;
890             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
891                 memory_region_del_subregion(sysmem, &xive->end_mmio);
892             }
893         } else {
894             xive->end_base = val & CQ_BAR_ADDR;
895             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
896                 memory_region_set_size(&xive->end_mmio,
897                                        pnv_xive2_bar_size(val));
898                 memory_region_add_subregion(sysmem, xive->end_base,
899                                             &xive->end_mmio);
900             }
901         }
902         break;
903 
904     case CQ_NVC_BAR:
905         xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
906         if (!(val & CQ_BAR_VALID)) {
907             xive->nvc_base = 0;
908             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
909                 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
910             }
911         } else {
912             xive->nvc_base = val & CQ_BAR_ADDR;
913             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
914                 memory_region_set_size(&xive->nvc_mmio,
915                                        pnv_xive2_bar_size(val));
916                 memory_region_add_subregion(sysmem, xive->nvc_base,
917                                             &xive->nvc_mmio);
918             }
919         }
920         break;
921 
922     case CQ_NVPG_BAR:
923         xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
924         if (!(val & CQ_BAR_VALID)) {
925             xive->nvpg_base = 0;
926             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
927                 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
928             }
929         } else {
930             xive->nvpg_base = val & CQ_BAR_ADDR;
931             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
932                 memory_region_set_size(&xive->nvpg_mmio,
933                                        pnv_xive2_bar_size(val));
934                 memory_region_add_subregion(sysmem, xive->nvpg_base,
935                                             &xive->nvpg_mmio);
936             }
937         }
938         break;
939 
940     case CQ_TAR: /* Set Translation Table Address */
941         break;
942     case CQ_TDR: /* Set Translation Table Data */
943         pnv_xive2_stt_set_data(xive, val);
944         break;
945     case CQ_FIRMASK_OR: /* FIR error reporting */
946         break;
947     default:
948         xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
949         return;
950     }
951 
952     xive->cq_regs[reg] = val;
953 }
954 
955 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
956     .read = pnv_xive2_ic_cq_read,
957     .write = pnv_xive2_ic_cq_write,
958     .endianness = DEVICE_BIG_ENDIAN,
959     .valid = {
960         .min_access_size = 8,
961         .max_access_size = 8,
962     },
963     .impl = {
964         .min_access_size = 8,
965         .max_access_size = 8,
966     },
967 };
968 
969 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
970                                      unsigned size)
971 {
972     PnvXive2 *xive = PNV_XIVE2(opaque);
973     uint64_t val = 0;
974     uint32_t reg = offset >> 3;
975 
976     switch (offset) {
977     /*
978      * VSD table settings.
979      */
980     case VC_VSD_TABLE_ADDR:
981     case VC_VSD_TABLE_DATA:
982         val = xive->vc_regs[reg];
983         break;
984 
985     /*
986      * ESB cache updates (not modeled)
987      */
988     case VC_ESBC_FLUSH_CTRL:
989         xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
990         val = xive->vc_regs[reg];
991         break;
992 
993     case VC_ESBC_CFG:
994         val = xive->vc_regs[reg];
995         break;
996 
997     /*
998      * EAS cache updates (not modeled)
999      */
1000     case VC_EASC_FLUSH_CTRL:
1001         xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
1002         val = xive->vc_regs[reg];
1003         break;
1004 
1005     /*
1006      * END cache updates
1007      */
1008     case VC_ENDC_WATCH0_SPEC:
1009         xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
1010         val = xive->vc_regs[reg];
1011         break;
1012 
1013     case VC_ENDC_WATCH0_DATA0:
1014         /*
1015          * Load DATA registers from cache with data requested by the
1016          * SPEC register
1017          */
1018         pnv_xive2_end_cache_load(xive);
1019         val = xive->vc_regs[reg];
1020         break;
1021 
1022     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1023         val = xive->vc_regs[reg];
1024         break;
1025 
1026     case VC_ENDC_FLUSH_CTRL:
1027         xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
1028         val = xive->vc_regs[reg];
1029         break;
1030 
1031     /*
1032      * Indirect invalidation
1033      */
1034     case VC_AT_MACRO_KILL_MASK:
1035         val = xive->vc_regs[reg];
1036         break;
1037 
1038     case VC_AT_MACRO_KILL:
1039         xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1040         val = xive->vc_regs[reg];
1041         break;
1042 
1043     /*
1044      * Interrupt fifo overflow in memory backing store (Not modeled)
1045      */
1046     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1047         val = xive->vc_regs[reg];
1048         break;
1049 
1050     /*
1051      * Synchronisation
1052      */
1053     case VC_ENDC_SYNC_DONE:
1054         val = VC_ENDC_SYNC_POLL_DONE;
1055         break;
1056     default:
1057         xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1058     }
1059 
1060     return val;
1061 }
1062 
1063 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1064                                   uint64_t val, unsigned size)
1065 {
1066     PnvXive2 *xive = PNV_XIVE2(opaque);
1067     uint32_t reg = offset >> 3;
1068 
1069     switch (offset) {
1070     /*
1071      * VSD table settings.
1072      */
1073     case VC_VSD_TABLE_ADDR:
1074        break;
1075     case VC_VSD_TABLE_DATA:
1076         pnv_xive2_vst_set_data(xive, val);
1077         break;
1078 
1079     /*
1080      * ESB cache updates (not modeled)
1081      */
1082     /* case VC_ESBC_FLUSH_CTRL: */
1083     case VC_ESBC_FLUSH_POLL:
1084         xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1085         /* ESB update */
1086         break;
1087 
1088     case VC_ESBC_CFG:
1089         break;
1090 
1091     /*
1092      * EAS cache updates (not modeled)
1093      */
1094     /* case VC_EASC_FLUSH_CTRL: */
1095     case VC_EASC_FLUSH_POLL:
1096         xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1097         /* EAS update */
1098         break;
1099 
1100     /*
1101      * END cache updates
1102      */
1103     case VC_ENDC_WATCH0_SPEC:
1104          val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1105         break;
1106 
1107     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1108         break;
1109     case VC_ENDC_WATCH0_DATA0:
1110         /* writing to DATA0 triggers the cache write */
1111         xive->vc_regs[reg] = val;
1112         pnv_xive2_end_update(xive);
1113         break;
1114 
1115 
1116     /* case VC_ENDC_FLUSH_CTRL: */
1117     case VC_ENDC_FLUSH_POLL:
1118         xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1119         break;
1120 
1121     /*
1122      * Indirect invalidation
1123      */
1124     case VC_AT_MACRO_KILL:
1125     case VC_AT_MACRO_KILL_MASK:
1126         break;
1127 
1128     /*
1129      * Interrupt fifo overflow in memory backing store (Not modeled)
1130      */
1131     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1132         break;
1133 
1134     /*
1135      * Synchronisation
1136      */
1137     case VC_ENDC_SYNC_DONE:
1138         break;
1139 
1140     default:
1141         xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1142         return;
1143     }
1144 
1145     xive->vc_regs[reg] = val;
1146 }
1147 
1148 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1149     .read = pnv_xive2_ic_vc_read,
1150     .write = pnv_xive2_ic_vc_write,
1151     .endianness = DEVICE_BIG_ENDIAN,
1152     .valid = {
1153         .min_access_size = 8,
1154         .max_access_size = 8,
1155     },
1156     .impl = {
1157         .min_access_size = 8,
1158         .max_access_size = 8,
1159     },
1160 };
1161 
1162 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1163                                      unsigned size)
1164 {
1165     PnvXive2 *xive = PNV_XIVE2(opaque);
1166     uint64_t val = -1;
1167     uint32_t reg = offset >> 3;
1168 
1169     switch (offset) {
1170     /*
1171      * VSD table settings.
1172      */
1173     case PC_VSD_TABLE_ADDR:
1174     case PC_VSD_TABLE_DATA:
1175         val = xive->pc_regs[reg];
1176         break;
1177 
1178     /*
1179      * cache updates
1180      */
1181     case PC_NXC_WATCH0_SPEC:
1182         xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1183         val = xive->pc_regs[reg];
1184         break;
1185 
1186     case PC_NXC_WATCH0_DATA0:
1187        /*
1188         * Load DATA registers from cache with data requested by the
1189         * SPEC register
1190         */
1191         pnv_xive2_nvp_cache_load(xive);
1192         val = xive->pc_regs[reg];
1193         break;
1194 
1195     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1196         val = xive->pc_regs[reg];
1197         break;
1198 
1199     case PC_NXC_FLUSH_CTRL:
1200         xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1201         val = xive->pc_regs[reg];
1202         break;
1203 
1204     /*
1205      * Indirect invalidation
1206      */
1207     case PC_AT_KILL:
1208         xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1209         val = xive->pc_regs[reg];
1210         break;
1211 
1212     default:
1213         xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1214     }
1215 
1216     return val;
1217 }
1218 
1219 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1220                                   uint64_t val, unsigned size)
1221 {
1222     PnvXive2 *xive = PNV_XIVE2(opaque);
1223     uint32_t reg = offset >> 3;
1224 
1225     switch (offset) {
1226 
1227     /*
1228      * VSD table settings. Only taken into account in the VC
1229      * sub-engine because the Xive2Router model combines both VC and PC
1230      * sub-engines
1231      */
1232     case PC_VSD_TABLE_ADDR:
1233     case PC_VSD_TABLE_DATA:
1234         break;
1235 
1236     /*
1237      * cache updates
1238      */
1239     case PC_NXC_WATCH0_SPEC:
1240         val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1241         break;
1242 
1243     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1244         break;
1245     case PC_NXC_WATCH0_DATA0:
1246         /* writing to DATA0 triggers the cache write */
1247         xive->pc_regs[reg] = val;
1248         pnv_xive2_nvp_update(xive);
1249         break;
1250 
1251    /* case PC_NXC_FLUSH_CTRL: */
1252     case PC_NXC_FLUSH_POLL:
1253         xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1254         break;
1255 
1256     /*
1257      * Indirect invalidation
1258      */
1259     case PC_AT_KILL:
1260     case PC_AT_KILL_MASK:
1261         break;
1262 
1263     default:
1264         xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1265         return;
1266     }
1267 
1268     xive->pc_regs[reg] = val;
1269 }
1270 
1271 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1272     .read = pnv_xive2_ic_pc_read,
1273     .write = pnv_xive2_ic_pc_write,
1274     .endianness = DEVICE_BIG_ENDIAN,
1275     .valid = {
1276         .min_access_size = 8,
1277         .max_access_size = 8,
1278     },
1279     .impl = {
1280         .min_access_size = 8,
1281         .max_access_size = 8,
1282     },
1283 };
1284 
1285 
1286 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1287                                         unsigned size)
1288 {
1289     PnvXive2 *xive = PNV_XIVE2(opaque);
1290     uint64_t val = -1;
1291     uint32_t reg = offset >> 3;
1292 
1293     switch (offset) {
1294     /*
1295      * XIVE2 hardware thread enablement
1296      */
1297     case TCTXT_EN0:
1298     case TCTXT_EN1:
1299         val = xive->tctxt_regs[reg];
1300         break;
1301 
1302     case TCTXT_EN0_SET:
1303     case TCTXT_EN0_RESET:
1304         val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1305         break;
1306     case TCTXT_EN1_SET:
1307     case TCTXT_EN1_RESET:
1308         val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1309         break;
1310     case TCTXT_CFG:
1311         val = xive->tctxt_regs[reg];
1312         break;
1313     default:
1314         xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1315     }
1316 
1317     return val;
1318 }
1319 
1320 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1321                                      uint64_t val, unsigned size)
1322 {
1323     PnvXive2 *xive = PNV_XIVE2(opaque);
1324     uint32_t reg = offset >> 3;
1325 
1326     switch (offset) {
1327     /*
1328      * XIVE2 hardware thread enablement
1329      */
1330     case TCTXT_EN0: /* Physical Thread Enable */
1331     case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1332         xive->tctxt_regs[reg] = val;
1333         break;
1334 
1335     case TCTXT_EN0_SET:
1336         xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1337         break;
1338     case TCTXT_EN1_SET:
1339         xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1340         break;
1341     case TCTXT_EN0_RESET:
1342         xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1343         break;
1344     case TCTXT_EN1_RESET:
1345         xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1346         break;
1347     case TCTXT_CFG:
1348         xive->tctxt_regs[reg] = val;
1349         break;
1350     default:
1351         xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1352         return;
1353     }
1354 }
1355 
1356 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1357     .read = pnv_xive2_ic_tctxt_read,
1358     .write = pnv_xive2_ic_tctxt_write,
1359     .endianness = DEVICE_BIG_ENDIAN,
1360     .valid = {
1361         .min_access_size = 8,
1362         .max_access_size = 8,
1363     },
1364     .impl = {
1365         .min_access_size = 8,
1366         .max_access_size = 8,
1367     },
1368 };
1369 
1370 /*
1371  * Redirect XSCOM to MMIO handlers
1372  */
1373 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1374                                      unsigned size)
1375 {
1376     PnvXive2 *xive = PNV_XIVE2(opaque);
1377     uint64_t val = -1;
1378     uint32_t xscom_reg = offset >> 3;
1379     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1380 
1381     switch (xscom_reg) {
1382     case 0x000 ... 0x0FF:
1383         val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1384         break;
1385     case 0x100 ... 0x1FF:
1386         val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1387         break;
1388     case 0x200 ... 0x2FF:
1389         val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1390         break;
1391     case 0x300 ... 0x3FF:
1392         val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1393         break;
1394     default:
1395         xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1396     }
1397 
1398     return val;
1399 }
1400 
1401 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1402                                   uint64_t val, unsigned size)
1403 {
1404     PnvXive2 *xive = PNV_XIVE2(opaque);
1405     uint32_t xscom_reg = offset >> 3;
1406     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1407 
1408     switch (xscom_reg) {
1409     case 0x000 ... 0x0FF:
1410         pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1411         break;
1412     case 0x100 ... 0x1FF:
1413         pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1414         break;
1415     case 0x200 ... 0x2FF:
1416         pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1417         break;
1418     case 0x300 ... 0x3FF:
1419         pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1420         break;
1421     default:
1422         xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1423     }
1424 }
1425 
1426 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1427     .read = pnv_xive2_xscom_read,
1428     .write = pnv_xive2_xscom_write,
1429     .endianness = DEVICE_BIG_ENDIAN,
1430     .valid = {
1431         .min_access_size = 8,
1432         .max_access_size = 8,
1433     },
1434     .impl = {
1435         .min_access_size = 8,
1436         .max_access_size = 8,
1437     },
1438 };
1439 
1440 /*
1441  * Notify port page. The layout is compatible between 4K and 64K pages :
1442  *
1443  * Page 1           Notify page (writes only)
1444  *  0x000 - 0x7FF   IPI interrupt (NPU)
1445  *  0x800 - 0xFFF   HW interrupt triggers (PSI, PHB)
1446  */
1447 
1448 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1449                                     uint64_t val)
1450 {
1451     uint8_t blk;
1452     uint32_t idx;
1453 
1454     if (val & XIVE_TRIGGER_END) {
1455         xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1456                    addr, val);
1457         return;
1458     }
1459 
1460     /*
1461      * Forward the source event notification directly to the Router.
1462      * The source interrupt number should already be correctly encoded
1463      * with the chip block id by the sending device (PHB, PSI).
1464      */
1465     blk = XIVE_EAS_BLOCK(val);
1466     idx = XIVE_EAS_INDEX(val);
1467 
1468     xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1469                          !!(val & XIVE_TRIGGER_PQ));
1470 }
1471 
1472 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1473                                       uint64_t val, unsigned size)
1474 {
1475     PnvXive2 *xive = PNV_XIVE2(opaque);
1476 
1477     /* VC: IPI triggers */
1478     switch (offset) {
1479     case 0x000 ... 0x7FF:
1480         /* TODO: check IPI notify sub-page routing */
1481         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1482         break;
1483 
1484     /* VC: HW triggers */
1485     case 0x800 ... 0xFFF:
1486         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1487         break;
1488 
1489     default:
1490         xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1491     }
1492 }
1493 
1494 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1495                                          unsigned size)
1496 {
1497     PnvXive2 *xive = PNV_XIVE2(opaque);
1498 
1499    /* loads are invalid */
1500     xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1501     return -1;
1502 }
1503 
1504 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1505     .read = pnv_xive2_ic_notify_read,
1506     .write = pnv_xive2_ic_notify_write,
1507     .endianness = DEVICE_BIG_ENDIAN,
1508     .valid = {
1509         .min_access_size = 8,
1510         .max_access_size = 8,
1511     },
1512     .impl = {
1513         .min_access_size = 8,
1514         .max_access_size = 8,
1515     },
1516 };
1517 
1518 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1519                                       unsigned size)
1520 {
1521     PnvXive2 *xive = PNV_XIVE2(opaque);
1522 
1523     xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1524     return -1;
1525 }
1526 
1527 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1528                                    uint64_t val, unsigned size)
1529 {
1530     PnvXive2 *xive = PNV_XIVE2(opaque);
1531 
1532     xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1533 }
1534 
1535 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1536     .read = pnv_xive2_ic_lsi_read,
1537     .write = pnv_xive2_ic_lsi_write,
1538     .endianness = DEVICE_BIG_ENDIAN,
1539     .valid = {
1540         .min_access_size = 8,
1541         .max_access_size = 8,
1542     },
1543     .impl = {
1544         .min_access_size = 8,
1545         .max_access_size = 8,
1546     },
1547 };
1548 
1549 /*
1550  * Sync MMIO page (write only)
1551  */
1552 #define PNV_XIVE2_SYNC_IPI      0x000
1553 #define PNV_XIVE2_SYNC_HW       0x080
1554 #define PNV_XIVE2_SYNC_NxC      0x100
1555 #define PNV_XIVE2_SYNC_INT      0x180
1556 #define PNV_XIVE2_SYNC_OS_ESC   0x200
1557 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
1558 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
1559 
1560 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1561                                        unsigned size)
1562 {
1563     PnvXive2 *xive = PNV_XIVE2(opaque);
1564 
1565     /* loads are invalid */
1566     xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1567     return -1;
1568 }
1569 
1570 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
1571                                     uint64_t val, unsigned size)
1572 {
1573     PnvXive2 *xive = PNV_XIVE2(opaque);
1574 
1575     switch (offset) {
1576     case PNV_XIVE2_SYNC_IPI:
1577     case PNV_XIVE2_SYNC_HW:
1578     case PNV_XIVE2_SYNC_NxC:
1579     case PNV_XIVE2_SYNC_INT:
1580     case PNV_XIVE2_SYNC_OS_ESC:
1581     case PNV_XIVE2_SYNC_POOL_ESC:
1582     case PNV_XIVE2_SYNC_HARD_ESC:
1583         break;
1584     default:
1585         xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
1586     }
1587 }
1588 
1589 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
1590     .read = pnv_xive2_ic_sync_read,
1591     .write = pnv_xive2_ic_sync_write,
1592     .endianness = DEVICE_BIG_ENDIAN,
1593     .valid = {
1594         .min_access_size = 8,
1595         .max_access_size = 8,
1596     },
1597     .impl = {
1598         .min_access_size = 8,
1599         .max_access_size = 8,
1600     },
1601 };
1602 
1603 /*
1604  * When the TM direct pages of the IC controller are accessed, the
1605  * target HW thread is deduced from the page offset.
1606  */
1607 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
1608 {
1609     /* On P10, the node ID shift in the PIR register is 8 bits */
1610     return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
1611 }
1612 
1613 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 *xive,
1614                                                    hwaddr offset)
1615 {
1616     /*
1617      * Indirect TIMA accesses are similar to direct accesses for
1618      * privilege ring 0. So remove any traces of the hw thread ID from
1619      * the offset in the IC BAR as it could be interpreted as the ring
1620      * privilege when calling the underlying direct access functions.
1621      */
1622     return offset & ((1ull << xive->ic_shift) - 1);
1623 }
1624 
1625 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
1626 {
1627     PnvChip *chip = xive->chip;
1628     PowerPCCPU *cpu = NULL;
1629 
1630     cpu = pnv_chip_find_cpu(chip, pir);
1631     if (!cpu) {
1632         xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
1633         return NULL;
1634     }
1635 
1636     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
1637         xive2_error(xive, "IC: CPU %x is not enabled", pir);
1638     }
1639 
1640     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1641 }
1642 
1643 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
1644                                               unsigned size)
1645 {
1646     PnvXive2 *xive = PNV_XIVE2(opaque);
1647     XivePresenter *xptr = XIVE_PRESENTER(xive);
1648     hwaddr hw_page_offset;
1649     uint32_t pir;
1650     XiveTCTX *tctx;
1651     uint64_t val = -1;
1652 
1653     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
1654     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
1655     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
1656     if (tctx) {
1657         val = xive_tctx_tm_read(xptr, tctx, hw_page_offset, size);
1658     }
1659 
1660     return val;
1661 }
1662 
1663 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
1664                                            uint64_t val, unsigned size)
1665 {
1666     PnvXive2 *xive = PNV_XIVE2(opaque);
1667     XivePresenter *xptr = XIVE_PRESENTER(xive);
1668     hwaddr hw_page_offset;
1669     uint32_t pir;
1670     XiveTCTX *tctx;
1671 
1672     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
1673     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
1674     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
1675     if (tctx) {
1676         xive_tctx_tm_write(xptr, tctx, hw_page_offset, val, size);
1677     }
1678 }
1679 
1680 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
1681     .read = pnv_xive2_ic_tm_indirect_read,
1682     .write = pnv_xive2_ic_tm_indirect_write,
1683     .endianness = DEVICE_BIG_ENDIAN,
1684     .valid = {
1685         .min_access_size = 1,
1686         .max_access_size = 8,
1687     },
1688     .impl = {
1689         .min_access_size = 1,
1690         .max_access_size = 8,
1691     },
1692 };
1693 
1694 /*
1695  * TIMA ops
1696  */
1697 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
1698                                uint64_t value, unsigned size)
1699 {
1700     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1701     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
1702     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1703     XivePresenter *xptr = XIVE_PRESENTER(xive);
1704 
1705     xive_tctx_tm_write(xptr, tctx, offset, value, size);
1706 }
1707 
1708 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
1709 {
1710     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1711     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
1712     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1713     XivePresenter *xptr = XIVE_PRESENTER(xive);
1714 
1715     return xive_tctx_tm_read(xptr, tctx, offset, size);
1716 }
1717 
1718 static const MemoryRegionOps pnv_xive2_tm_ops = {
1719     .read = pnv_xive2_tm_read,
1720     .write = pnv_xive2_tm_write,
1721     .endianness = DEVICE_BIG_ENDIAN,
1722     .valid = {
1723         .min_access_size = 1,
1724         .max_access_size = 8,
1725     },
1726     .impl = {
1727         .min_access_size = 1,
1728         .max_access_size = 8,
1729     },
1730 };
1731 
1732 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
1733                                    unsigned size)
1734 {
1735     PnvXive2 *xive = PNV_XIVE2(opaque);
1736 
1737     xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
1738     return -1;
1739 }
1740 
1741 static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
1742                                 uint64_t val, unsigned size)
1743 {
1744     PnvXive2 *xive = PNV_XIVE2(opaque);
1745 
1746     xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
1747 }
1748 
1749 static const MemoryRegionOps pnv_xive2_nvc_ops = {
1750     .read = pnv_xive2_nvc_read,
1751     .write = pnv_xive2_nvc_write,
1752     .endianness = DEVICE_BIG_ENDIAN,
1753     .valid = {
1754         .min_access_size = 8,
1755         .max_access_size = 8,
1756     },
1757     .impl = {
1758         .min_access_size = 8,
1759         .max_access_size = 8,
1760     },
1761 };
1762 
1763 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
1764                                     unsigned size)
1765 {
1766     PnvXive2 *xive = PNV_XIVE2(opaque);
1767 
1768     xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
1769     return -1;
1770 }
1771 
1772 static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
1773                                  uint64_t val, unsigned size)
1774 {
1775     PnvXive2 *xive = PNV_XIVE2(opaque);
1776 
1777     xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
1778 }
1779 
1780 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
1781     .read = pnv_xive2_nvpg_read,
1782     .write = pnv_xive2_nvpg_write,
1783     .endianness = DEVICE_BIG_ENDIAN,
1784     .valid = {
1785         .min_access_size = 8,
1786         .max_access_size = 8,
1787     },
1788     .impl = {
1789         .min_access_size = 8,
1790         .max_access_size = 8,
1791     },
1792 };
1793 
1794 /*
1795  * POWER10 default capabilities: 0x2000120076f000FC
1796  */
1797 #define PNV_XIVE2_CAPABILITIES  0x2000120076f000FC
1798 
1799 /*
1800  * POWER10 default configuration: 0x0030000033000000
1801  *
1802  * 8bits thread id was dropped for P10
1803  */
1804 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
1805 
1806 static void pnv_xive2_reset(void *dev)
1807 {
1808     PnvXive2 *xive = PNV_XIVE2(dev);
1809     XiveSource *xsrc = &xive->ipi_source;
1810     Xive2EndSource *end_xsrc = &xive->end_source;
1811 
1812     xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
1813     xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
1814 
1815     /* HW hardwires the #Topology of the chip in the block field */
1816     xive->cq_regs[CQ_XIVE_CFG >> 3] |=
1817         SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
1818 
1819     /* Set default page size to 64k */
1820     xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
1821     xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
1822 
1823     /* Clear source MMIOs */
1824     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1825         memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
1826     }
1827 
1828     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1829         memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
1830     }
1831 }
1832 
1833 /*
1834  *  Maximum number of IRQs and ENDs supported by HW. Will be tuned by
1835  *  software.
1836  */
1837 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1838 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1839 
1840 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
1841 {
1842     PnvXive2 *xive = PNV_XIVE2(dev);
1843     PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
1844     XiveSource *xsrc = &xive->ipi_source;
1845     Xive2EndSource *end_xsrc = &xive->end_source;
1846     Error *local_err = NULL;
1847     int i;
1848 
1849     pxc->parent_realize(dev, &local_err);
1850     if (local_err) {
1851         error_propagate(errp, local_err);
1852         return;
1853     }
1854 
1855     assert(xive->chip);
1856 
1857     /*
1858      * The XiveSource and Xive2EndSource objects are realized with the
1859      * maximum allowed HW configuration. The ESB MMIO regions will be
1860      * resized dynamically when the controller is configured by the FW
1861      * to limit accesses to resources not provisioned.
1862      */
1863     object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
1864                             &error_fatal);
1865     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
1866                             &error_fatal);
1867     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
1868                              &error_fatal);
1869     qdev_realize(DEVICE(xsrc), NULL, &local_err);
1870     if (local_err) {
1871         error_propagate(errp, local_err);
1872         return;
1873     }
1874 
1875     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
1876                             &error_fatal);
1877     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1878                              &error_abort);
1879     qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
1880     if (local_err) {
1881         error_propagate(errp, local_err);
1882         return;
1883     }
1884 
1885     /* XSCOM region, used for initial configuration of the BARs */
1886     memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
1887                           &pnv_xive2_xscom_ops, xive, "xscom-xive",
1888                           PNV10_XSCOM_XIVE2_SIZE << 3);
1889 
1890     /* Interrupt controller MMIO regions */
1891     xive->ic_shift = 16;
1892     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1893                        PNV10_XIVE2_IC_SIZE);
1894 
1895     for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1896         memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
1897                          pnv_xive2_ic_regions[i].ops, xive,
1898                          pnv_xive2_ic_regions[i].name,
1899                          pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
1900     }
1901 
1902     /*
1903      * VC MMIO regions.
1904      */
1905     xive->esb_shift = 16;
1906     xive->end_shift = 16;
1907     memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
1908                        PNV10_XIVE2_ESB_SIZE);
1909     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
1910                        PNV10_XIVE2_END_SIZE);
1911 
1912     /* Presenter Controller MMIO region (not modeled) */
1913     xive->nvc_shift = 16;
1914     xive->nvpg_shift = 16;
1915     memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
1916                           &pnv_xive2_nvc_ops, xive,
1917                           "xive-nvc", PNV10_XIVE2_NVC_SIZE);
1918 
1919     memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
1920                           &pnv_xive2_nvpg_ops, xive,
1921                           "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
1922 
1923     /* Thread Interrupt Management Area (Direct) */
1924     xive->tm_shift = 16;
1925     memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
1926                           xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
1927 
1928     qemu_register_reset(pnv_xive2_reset, dev);
1929 }
1930 
1931 static Property pnv_xive2_properties[] = {
1932     DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
1933     DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
1934     DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
1935     DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
1936     DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
1937     DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
1938     DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
1939                        PNV_XIVE2_CAPABILITIES),
1940     DEFINE_PROP_UINT64("config", PnvXive2, config,
1941                        PNV_XIVE2_CONFIGURATION),
1942     DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
1943     DEFINE_PROP_END_OF_LIST(),
1944 };
1945 
1946 static void pnv_xive2_instance_init(Object *obj)
1947 {
1948     PnvXive2 *xive = PNV_XIVE2(obj);
1949 
1950     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1951                             TYPE_XIVE_SOURCE);
1952     object_initialize_child(obj, "end_source", &xive->end_source,
1953                             TYPE_XIVE2_END_SOURCE);
1954 }
1955 
1956 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
1957                               int xscom_offset)
1958 {
1959     const char compat_p10[] = "ibm,power10-xive-x";
1960     char *name;
1961     int offset;
1962     uint32_t reg[] = {
1963         cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
1964         cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
1965     };
1966 
1967     name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
1968     offset = fdt_add_subnode(fdt, xscom_offset, name);
1969     _FDT(offset);
1970     g_free(name);
1971 
1972     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1973     _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
1974                      sizeof(compat_p10)));
1975     return 0;
1976 }
1977 
1978 static void pnv_xive2_class_init(ObjectClass *klass, void *data)
1979 {
1980     DeviceClass *dc = DEVICE_CLASS(klass);
1981     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1982     Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
1983     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1984     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1985     PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
1986 
1987     xdc->dt_xscom  = pnv_xive2_dt_xscom;
1988 
1989     dc->desc       = "PowerNV XIVE2 Interrupt Controller (POWER10)";
1990     device_class_set_parent_realize(dc, pnv_xive2_realize,
1991                                     &pxc->parent_realize);
1992     device_class_set_props(dc, pnv_xive2_properties);
1993 
1994     xrc->get_eas   = pnv_xive2_get_eas;
1995     xrc->get_pq    = pnv_xive2_get_pq;
1996     xrc->set_pq    = pnv_xive2_set_pq;
1997     xrc->get_end   = pnv_xive2_get_end;
1998     xrc->write_end = pnv_xive2_write_end;
1999     xrc->get_nvp   = pnv_xive2_get_nvp;
2000     xrc->write_nvp = pnv_xive2_write_nvp;
2001     xrc->get_config  = pnv_xive2_get_config;
2002     xrc->get_block_id = pnv_xive2_get_block_id;
2003 
2004     xnc->notify    = pnv_xive2_notify;
2005 
2006     xpc->match_nvt  = pnv_xive2_match_nvt;
2007     xpc->get_config = pnv_xive2_presenter_get_config;
2008 };
2009 
2010 static const TypeInfo pnv_xive2_info = {
2011     .name          = TYPE_PNV_XIVE2,
2012     .parent        = TYPE_XIVE2_ROUTER,
2013     .instance_init = pnv_xive2_instance_init,
2014     .instance_size = sizeof(PnvXive2),
2015     .class_init    = pnv_xive2_class_init,
2016     .class_size    = sizeof(PnvXive2Class),
2017     .interfaces    = (InterfaceInfo[]) {
2018         { TYPE_PNV_XSCOM_INTERFACE },
2019         { }
2020     }
2021 };
2022 
2023 static void pnv_xive2_register_types(void)
2024 {
2025     type_register_static(&pnv_xive2_info);
2026 }
2027 
2028 type_init(pnv_xive2_register_types)
2029 
2030 static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
2031                                      GString *buf)
2032 {
2033     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
2034     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
2035 
2036     if (!xive2_nvp_is_valid(nvp)) {
2037         return;
2038     }
2039 
2040     g_string_append_printf(buf, "  %08x end:%02x/%04x IPB:%02x",
2041                            nvp_idx, eq_blk, eq_idx,
2042                            xive_get_field32(NVP2_W2_IPB, nvp->w2));
2043     /*
2044      * When the NVP is HW controlled, more fields are updated
2045      */
2046     if (xive2_nvp_is_hw(nvp)) {
2047         g_string_append_printf(buf, " CPPR:%02x",
2048                                xive_get_field32(NVP2_W2_CPPR, nvp->w2));
2049         if (xive2_nvp_is_co(nvp)) {
2050             g_string_append_printf(buf, " CO:%04x",
2051                                    xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
2052         }
2053     }
2054     g_string_append_c(buf, '\n');
2055 }
2056 
2057 /*
2058  * If the table is direct, we can compute the number of PQ entries
2059  * provisioned by FW.
2060  */
2061 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2062 {
2063     uint8_t blk = pnv_xive2_block_id(xive);
2064     uint64_t vsd = xive->vsds[VST_ESB][blk];
2065     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2066 
2067     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2068 }
2069 
2070 /*
2071  * Compute the number of entries per indirect subpage.
2072  */
2073 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2074 {
2075     uint8_t blk = pnv_xive2_block_id(xive);
2076     uint64_t vsd = xive->vsds[type][blk];
2077     const XiveVstInfo *info = &vst_infos[type];
2078     uint64_t vsd_addr;
2079     uint32_t page_shift;
2080 
2081     /* For direct tables, fake a valid value */
2082     if (!(VSD_INDIRECT & vsd)) {
2083         return 1;
2084     }
2085 
2086     /* Get the page size of the indirect table. */
2087     vsd_addr = vsd & VSD_ADDRESS_MASK;
2088     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2089 
2090     if (!(vsd & VSD_ADDRESS_MASK)) {
2091 #ifdef XIVE2_DEBUG
2092         xive2_error(xive, "VST: invalid %s entry!?", info->name);
2093 #endif
2094         return 0;
2095     }
2096 
2097     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2098 
2099     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2100         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2101                    page_shift);
2102         return 0;
2103     }
2104 
2105     return (1ull << page_shift) / info->size;
2106 }
2107 
2108 void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon)
2109 {
2110     Xive2Router *xrtr = XIVE2_ROUTER(xive);
2111     uint8_t blk = pnv_xive2_block_id(xive);
2112     uint8_t chip_id = xive->chip->chip_id;
2113     uint32_t srcno0 = XIVE_EAS(blk, 0);
2114     uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2115     Xive2Eas eas;
2116     Xive2End end;
2117     Xive2Nvp nvp;
2118     int i;
2119     uint64_t xive_nvp_per_subpage;
2120     g_autoptr(GString) buf = g_string_new("");
2121     g_autoptr(HumanReadableText) info = NULL;
2122 
2123     g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
2124                            blk, srcno0, srcno0 + nr_esbs - 1);
2125     xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
2126 
2127     g_string_append_printf(buf, "XIVE[%x] EAT %08x .. %08x\n",
2128                            blk, srcno0, srcno0 + nr_esbs - 1);
2129     for (i = 0; i < nr_esbs; i++) {
2130         if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2131             break;
2132         }
2133         if (!xive2_eas_is_masked(&eas)) {
2134             xive2_eas_pic_print_info(&eas, i, buf);
2135         }
2136     }
2137 
2138     g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
2139                            chip_id, blk);
2140     i = 0;
2141     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2142         xive2_end_eas_pic_print_info(&end, i++, buf);
2143     }
2144 
2145     g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2146     i = 0;
2147     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2148         xive2_end_pic_print_info(&end, i++, buf);
2149     }
2150 
2151     g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
2152                            chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2153     xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2154     for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
2155         while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2156             xive2_nvp_pic_print_info(&nvp, i++, buf);
2157         }
2158     }
2159 
2160     info = human_readable_text_from_str(buf);
2161     monitor_puts(mon, info->human_readable_text);
2162 }
2163