xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision ed409be14c00a4d818e63fed0f4537c845bf319c)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_chip.h"
22 #include "hw/ppc/pnv_core.h"
23 #include "hw/ppc/pnv_xscom.h"
24 #include "hw/ppc/pnv_xive.h"
25 #include "hw/ppc/xive_regs.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/ppc/ppc.h"
28 #include "trace.h"
29 
30 #include <libfdt.h>
31 
32 #include "pnv_xive_regs.h"
33 
34 #undef XIVE_DEBUG
35 
36 /*
37  * Virtual structures table (VST)
38  */
39 #define SBE_PER_BYTE   4
40 
41 typedef struct XiveVstInfo {
42     const char *name;
43     uint32_t    size;
44     uint32_t    max_blocks;
45 } XiveVstInfo;
46 
47 static const XiveVstInfo vst_infos[] = {
48     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
49     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
50     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
51     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
52 
53     /*
54      *  Interrupt fifo backing store table (not modeled) :
55      *
56      * 0 - IPI,
57      * 1 - HWD,
58      * 2 - First escalate,
59      * 3 - Second escalate,
60      * 4 - Redistribution,
61      * 5 - IPI cascaded queue ?
62      */
63     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
64 };
65 
66 #define xive_error(xive, fmt, ...)                                      \
67     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
68                   (xive)->chip->chip_id, ## __VA_ARGS__);
69 
70 /*
71  * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
72  * field overrides the hardwired chip ID in the Powerbus operations
73  * and for CAM compares
74  */
75 static uint8_t pnv_xive_block_id(PnvXive *xive)
76 {
77     uint8_t blk = xive->chip->chip_id;
78     uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
79 
80     if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
81         blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
82     }
83 
84     return blk;
85 }
86 
87 /*
88  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
89  * of the chips is good enough.
90  *
91  * TODO: Block scope support
92  */
93 static PnvXive *pnv_xive_get_remote(uint8_t blk)
94 {
95     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
96     int i;
97 
98     for (i = 0; i < pnv->num_chips; i++) {
99         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
100         PnvXive *xive = &chip9->xive;
101 
102         if (pnv_xive_block_id(xive) == blk) {
103             return xive;
104         }
105     }
106     return NULL;
107 }
108 
109 /*
110  * VST accessors for SBE, EAT, ENDT, NVT
111  *
112  * Indirect VST tables are arrays of VSDs pointing to a page (of same
113  * size). Each page is a direct VST table.
114  */
115 
116 #define XIVE_VSD_SIZE 8
117 
118 /* Indirect page size can be 4K, 64K, 2M, 16M. */
119 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
120 {
121      return page_shift == 12 || page_shift == 16 ||
122          page_shift == 21 || page_shift == 24;
123 }
124 
125 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
126                                          uint64_t vsd, uint32_t idx)
127 {
128     const XiveVstInfo *info = &vst_infos[type];
129     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
130     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
131     uint32_t idx_max;
132 
133     idx_max = vst_tsize / info->size - 1;
134     if (idx > idx_max) {
135 #ifdef XIVE_DEBUG
136         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
137                    info->name, idx, idx_max);
138 #endif
139         return 0;
140     }
141 
142     return vst_addr + idx * info->size;
143 }
144 
145 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
146                                            uint64_t vsd, uint32_t idx)
147 {
148     const XiveVstInfo *info = &vst_infos[type];
149     uint64_t vsd_addr;
150     uint32_t vsd_idx;
151     uint32_t page_shift;
152     uint32_t vst_per_page;
153 
154     /* Get the page size of the indirect table. */
155     vsd_addr = vsd & VSD_ADDRESS_MASK;
156     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
157                     MEMTXATTRS_UNSPECIFIED)) {
158         xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
159                    info->name, idx, vsd_addr);
160         return 0;
161     }
162 
163     if (!(vsd & VSD_ADDRESS_MASK)) {
164 #ifdef XIVE_DEBUG
165         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
166 #endif
167         return 0;
168     }
169 
170     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
171 
172     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
173         xive_error(xive, "VST: invalid %s page shift %d", info->name,
174                    page_shift);
175         return 0;
176     }
177 
178     vst_per_page = (1ull << page_shift) / info->size;
179     vsd_idx = idx / vst_per_page;
180 
181     /* Load the VSD we are looking for, if not already done */
182     if (vsd_idx) {
183         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
184         if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
185                        MEMTXATTRS_UNSPECIFIED)) {
186             xive_error(xive, "VST: failed to access %s entry %x @0x%"
187                        PRIx64, info->name, vsd_idx, vsd_addr);
188             return 0;
189         }
190 
191         if (!(vsd & VSD_ADDRESS_MASK)) {
192 #ifdef XIVE_DEBUG
193             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
194 #endif
195             return 0;
196         }
197 
198         /*
199          * Check that the pages have a consistent size across the
200          * indirect table
201          */
202         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
203             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
204                        info->name, idx);
205             return 0;
206         }
207     }
208 
209     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
210 }
211 
212 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
213                                   uint32_t idx)
214 {
215     const XiveVstInfo *info = &vst_infos[type];
216     uint64_t vsd;
217 
218     if (blk >= info->max_blocks) {
219         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
220                    blk, info->name, idx);
221         return 0;
222     }
223 
224     vsd = xive->vsds[type][blk];
225 
226     /* Remote VST access */
227     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
228         xive = pnv_xive_get_remote(blk);
229 
230         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
231     }
232 
233     if (VSD_INDIRECT & vsd) {
234         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
235     }
236 
237     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
238 }
239 
240 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
241                              uint32_t idx, void *data)
242 {
243     const XiveVstInfo *info = &vst_infos[type];
244     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
245     MemTxResult result;
246 
247     if (!addr) {
248         return -1;
249     }
250 
251     result = address_space_read(&address_space_memory, addr,
252                                 MEMTXATTRS_UNSPECIFIED, data,
253                                 info->size);
254     if (result != MEMTX_OK) {
255         xive_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
256                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
257         return -1;
258     }
259     return 0;
260 }
261 
262 #define XIVE_VST_WORD_ALL -1
263 
264 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
265                               uint32_t idx, void *data, uint32_t word_number)
266 {
267     const XiveVstInfo *info = &vst_infos[type];
268     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
269     MemTxResult result;
270 
271     if (!addr) {
272         return -1;
273     }
274 
275     if (word_number == XIVE_VST_WORD_ALL) {
276         result = address_space_write(&address_space_memory, addr,
277                                      MEMTXATTRS_UNSPECIFIED, data,
278                                      info->size);
279     } else {
280         result = address_space_write(&address_space_memory,
281                                      addr + word_number * 4,
282                                      MEMTXATTRS_UNSPECIFIED,
283                                      data + word_number * 4, 4);
284     }
285 
286     if (result != MEMTX_OK) {
287         xive_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
288                     "for VST %s %x/%x\n", addr, info->name, blk, idx);
289         return -1;
290     }
291     return 0;
292 }
293 
294 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
295                             XiveEND *end)
296 {
297     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
298 }
299 
300 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
301                               XiveEND *end, uint8_t word_number)
302 {
303     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
304                               word_number);
305 }
306 
307 static int pnv_xive_end_update(PnvXive *xive)
308 {
309     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
310                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
311     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
312                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
313     int i;
314     uint64_t eqc_watch[4];
315 
316     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
317         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
318     }
319 
320     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
321                               XIVE_VST_WORD_ALL);
322 }
323 
324 static void pnv_xive_end_cache_load(PnvXive *xive)
325 {
326     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
327                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
328     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
329                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
330     uint64_t eqc_watch[4] = { 0 };
331     int i;
332 
333     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
334         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
335     }
336 
337     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
338         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
339     }
340 }
341 
342 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
343                             XiveNVT *nvt)
344 {
345     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
346 }
347 
348 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
349                               XiveNVT *nvt, uint8_t word_number)
350 {
351     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
352                               word_number);
353 }
354 
355 static int pnv_xive_nvt_update(PnvXive *xive)
356 {
357     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
358                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
359     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
360                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
361     int i;
362     uint64_t vpc_watch[8];
363 
364     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
365         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
366     }
367 
368     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
369                               XIVE_VST_WORD_ALL);
370 }
371 
372 static void pnv_xive_nvt_cache_load(PnvXive *xive)
373 {
374     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
375                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
376     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
377                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
378     uint64_t vpc_watch[8] = { 0 };
379     int i;
380 
381     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
382         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
383     }
384 
385     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
386         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
387     }
388 }
389 
390 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
391                             XiveEAS *eas)
392 {
393     PnvXive *xive = PNV_XIVE(xrtr);
394 
395     /*
396      * EAT lookups should be local to the IC
397      */
398     if (pnv_xive_block_id(xive) != blk) {
399         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
400         return -1;
401     }
402 
403     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
404 }
405 
406 static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
407                            uint8_t *pq)
408 {
409     PnvXive *xive = PNV_XIVE(xrtr);
410 
411     if (pnv_xive_block_id(xive) != blk) {
412         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
413         return -1;
414     }
415 
416     *pq = xive_source_esb_get(&xive->ipi_source, idx);
417     return 0;
418 }
419 
420 static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
421                            uint8_t *pq)
422 {
423     PnvXive *xive = PNV_XIVE(xrtr);
424 
425     if (pnv_xive_block_id(xive) != blk) {
426         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
427         return -1;
428     }
429 
430     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
431     return 0;
432 }
433 
434 /*
435  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
436  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
437  * second register covers cores 16-23 (normal) or 8-11 (fused).
438  */
439 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
440 {
441     int pir = ppc_cpu_pir(cpu);
442     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
443     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
444     uint32_t bit = pir & 0x3f;
445 
446     return xive->regs[reg >> 3] & PPC_BIT(bit);
447 }
448 
449 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
450                               uint8_t nvt_blk, uint32_t nvt_idx,
451                               bool cam_ignore, uint8_t priority,
452                               uint32_t logic_serv, XiveTCTXMatch *match)
453 {
454     PnvXive *xive = PNV_XIVE(xptr);
455     PnvChip *chip = xive->chip;
456     int count = 0;
457     int i, j;
458 
459     for (i = 0; i < chip->nr_cores; i++) {
460         PnvCore *pc = chip->cores[i];
461         CPUCore *cc = CPU_CORE(pc);
462 
463         for (j = 0; j < cc->nr_threads; j++) {
464             PowerPCCPU *cpu = pc->threads[j];
465             XiveTCTX *tctx;
466             int ring;
467 
468             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
469                 continue;
470             }
471 
472             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
473 
474             /*
475              * Check the thread context CAM lines and record matches.
476              */
477             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
478                                              nvt_idx, cam_ignore, logic_serv);
479             /*
480              * Save the context and follow on to catch duplicates, that we
481              * don't support yet.
482              */
483             if (ring != -1) {
484                 if (match->tctx) {
485                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
486                                   "thread context NVT %x/%x\n",
487                                   nvt_blk, nvt_idx);
488                     return -1;
489                 }
490 
491                 match->ring = ring;
492                 match->tctx = tctx;
493                 count++;
494             }
495         }
496     }
497 
498     return count;
499 }
500 
501 static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr)
502 {
503     uint32_t cfg = 0;
504 
505     /* TIMA GEN1 is all P9 knows */
506     cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
507 
508     return cfg;
509 }
510 
511 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
512 {
513     return pnv_xive_block_id(PNV_XIVE(xrtr));
514 }
515 
516 /*
517  * The TIMA MMIO space is shared among the chips and to identify the
518  * chip from which the access is being done, we extract the chip id
519  * from the PIR.
520  */
521 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
522 {
523     int pir = ppc_cpu_pir(cpu);
524     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
525     PnvXive *xive = PNV_XIVE(xptr);
526 
527     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
528         xive_error(xive, "IC: CPU %x is not enabled", pir);
529     }
530     return xive;
531 }
532 
533 /*
534  * The internal sources (IPIs) of the interrupt controller have no
535  * knowledge of the XIVE chip on which they reside. Encode the block
536  * id in the source interrupt number before forwarding the source
537  * event notification to the Router. This is required on a multichip
538  * system.
539  */
540 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
541 {
542     PnvXive *xive = PNV_XIVE(xn);
543     uint8_t blk = pnv_xive_block_id(xive);
544 
545     xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
546 }
547 
548 /*
549  * XIVE helpers
550  */
551 
552 static uint64_t pnv_xive_vc_size(PnvXive *xive)
553 {
554     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
555 }
556 
557 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
558 {
559     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
560 }
561 
562 static uint64_t pnv_xive_pc_size(PnvXive *xive)
563 {
564     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
565 }
566 
567 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
568 {
569     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
570     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
571 
572     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
573 }
574 
575 /*
576  * Compute the number of entries per indirect subpage.
577  */
578 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
579 {
580     uint8_t blk = pnv_xive_block_id(xive);
581     uint64_t vsd = xive->vsds[type][blk];
582     const XiveVstInfo *info = &vst_infos[type];
583     uint64_t vsd_addr;
584     uint32_t page_shift;
585 
586     /* For direct tables, fake a valid value */
587     if (!(VSD_INDIRECT & vsd)) {
588         return 1;
589     }
590 
591     /* Get the page size of the indirect table. */
592     vsd_addr = vsd & VSD_ADDRESS_MASK;
593     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
594                    MEMTXATTRS_UNSPECIFIED)) {
595         xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
596                    info->name, vsd_addr);
597         return 0;
598     }
599 
600     if (!(vsd & VSD_ADDRESS_MASK)) {
601 #ifdef XIVE_DEBUG
602         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
603 #endif
604         return 0;
605     }
606 
607     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
608 
609     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
610         xive_error(xive, "VST: invalid %s page shift %d", info->name,
611                    page_shift);
612         return 0;
613     }
614 
615     return (1ull << page_shift) / info->size;
616 }
617 
618 /*
619  * EDT Table
620  *
621  * The Virtualization Controller MMIO region containing the IPI ESB
622  * pages and END ESB pages is sub-divided into "sets" which map
623  * portions of the VC region to the different ESB pages. It is
624  * configured at runtime through the EDT "Domain Table" to let the
625  * firmware decide how to split the VC address space between IPI ESB
626  * pages and END ESB pages.
627  */
628 
629 /*
630  * Computes the overall size of the IPI or the END ESB pages
631  */
632 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
633 {
634     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
635     uint64_t size = 0;
636     int i;
637 
638     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
639         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
640 
641         if (edt_type == type) {
642             size += edt_size;
643         }
644     }
645 
646     return size;
647 }
648 
649 /*
650  * Maps an offset of the VC region in the IPI or END region using the
651  * layout defined by the EDT "Domaine Table"
652  */
653 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
654                                               uint64_t type)
655 {
656     int i;
657     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
658     uint64_t edt_offset = vc_offset;
659 
660     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
661         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
662 
663         if (edt_type != type) {
664             edt_offset -= edt_size;
665         }
666     }
667 
668     return edt_offset;
669 }
670 
671 static void pnv_xive_edt_resize(PnvXive *xive)
672 {
673     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
674     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
675 
676     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
677     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
678 
679     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
680     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
681 }
682 
683 /*
684  * XIVE Table configuration. Only EDT is supported.
685  */
686 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
687 {
688     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
689     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
690     uint64_t *xive_table;
691     uint8_t max_index;
692 
693     switch (tsel) {
694     case CQ_TAR_TSEL_BLK:
695         max_index = ARRAY_SIZE(xive->blk);
696         xive_table = xive->blk;
697         break;
698     case CQ_TAR_TSEL_MIG:
699         max_index = ARRAY_SIZE(xive->mig);
700         xive_table = xive->mig;
701         break;
702     case CQ_TAR_TSEL_EDT:
703         max_index = ARRAY_SIZE(xive->edt);
704         xive_table = xive->edt;
705         break;
706     case CQ_TAR_TSEL_VDT:
707         max_index = ARRAY_SIZE(xive->vdt);
708         xive_table = xive->vdt;
709         break;
710     default:
711         xive_error(xive, "IC: invalid table %d", (int) tsel);
712         return -1;
713     }
714 
715     if (tsel_index >= max_index) {
716         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
717         return -1;
718     }
719 
720     xive_table[tsel_index] = val;
721 
722     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
723         xive->regs[CQ_TAR >> 3] =
724             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
725     }
726 
727     /*
728      * EDT configuration is complete. Resize the MMIO windows exposing
729      * the IPI and the END ESBs in the VC region.
730      */
731     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
732         pnv_xive_edt_resize(xive);
733     }
734 
735     return 0;
736 }
737 
738 /*
739  * Virtual Structure Tables (VST) configuration
740  */
741 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
742                                        uint8_t blk, uint64_t vsd)
743 {
744     XiveENDSource *end_xsrc = &xive->end_source;
745     XiveSource *xsrc = &xive->ipi_source;
746     const XiveVstInfo *info = &vst_infos[type];
747     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
748     uint64_t vst_tsize = 1ull << page_shift;
749     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
750 
751     /* Basic checks */
752 
753     if (VSD_INDIRECT & vsd) {
754         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
755             xive_error(xive, "VST: %s indirect tables are not enabled",
756                        info->name);
757             return;
758         }
759 
760         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
761             xive_error(xive, "VST: invalid %s page shift %d", info->name,
762                        page_shift);
763             return;
764         }
765     }
766 
767     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
768         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
769                    " page shift %d", info->name, vst_addr, page_shift);
770         return;
771     }
772 
773     /* Record the table configuration (in SRAM on HW) */
774     xive->vsds[type][blk] = vsd;
775 
776     /* Now tune the models with the configuration provided by the FW */
777 
778     switch (type) {
779     case VST_TSEL_IVT:  /* Nothing to be done */
780         break;
781 
782     case VST_TSEL_EQDT:
783         /*
784          * Backing store pages for the END.
785          *
786          * If the table is direct, we can compute the number of PQ
787          * entries provisioned by FW (such as skiboot) and resize the
788          * END ESB window accordingly.
789          */
790         if (!(VSD_INDIRECT & vsd)) {
791             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
792                                    * (1ull << xsrc->esb_shift));
793         }
794         memory_region_add_subregion(&xive->end_edt_mmio, 0,
795                                     &end_xsrc->esb_mmio);
796         break;
797 
798     case VST_TSEL_SBE:
799         /*
800          * Backing store pages for the source PQ bits. The model does
801          * not use these PQ bits backed in RAM because the XiveSource
802          * model has its own.
803          *
804          * If the table is direct, we can compute the number of PQ
805          * entries provisioned by FW (such as skiboot) and resize the
806          * ESB window accordingly.
807          */
808         if (!(VSD_INDIRECT & vsd)) {
809             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
810                                    * (1ull << xsrc->esb_shift));
811         }
812         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
813         break;
814 
815     case VST_TSEL_VPDT: /* Not modeled */
816     case VST_TSEL_IRQ:  /* Not modeled */
817         /*
818          * These tables contains the backing store pages for the
819          * interrupt fifos of the VC sub-engine in case of overflow.
820          */
821         break;
822 
823     default:
824         g_assert_not_reached();
825     }
826 }
827 
828 /*
829  * Both PC and VC sub-engines are configured as each use the Virtual
830  * Structure Tables : SBE, EAS, END and NVT.
831  */
832 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
833 {
834     uint8_t mode = GETFIELD(VSD_MODE, vsd);
835     uint8_t type = GETFIELD(VST_TABLE_SELECT,
836                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
837     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
838                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
839     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
840 
841     if (type > VST_TSEL_IRQ) {
842         xive_error(xive, "VST: invalid table type %d", type);
843         return;
844     }
845 
846     if (blk >= vst_infos[type].max_blocks) {
847         xive_error(xive, "VST: invalid block id %d for"
848                       " %s table", blk, vst_infos[type].name);
849         return;
850     }
851 
852     /*
853      * Only take the VC sub-engine configuration into account because
854      * the XiveRouter model combines both VC and PC sub-engines
855      */
856     if (pc_engine) {
857         return;
858     }
859 
860     if (!vst_addr) {
861         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
862         return;
863     }
864 
865     switch (mode) {
866     case VSD_MODE_FORWARD:
867         xive->vsds[type][blk] = vsd;
868         break;
869 
870     case VSD_MODE_EXCLUSIVE:
871         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
872         break;
873 
874     default:
875         xive_error(xive, "VST: unsupported table mode %d", mode);
876         return;
877     }
878 }
879 
880 /*
881  * Interrupt controller MMIO region. The layout is compatible between
882  * 4K and 64K pages :
883  *
884  * Page 0           sub-engine BARs
885  *  0x000 - 0x3FF   IC registers
886  *  0x400 - 0x7FF   PC registers
887  *  0x800 - 0xFFF   VC registers
888  *
889  * Page 1           Notify page (writes only)
890  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
891  *  0x800 - 0xFFF   forwards and syncs
892  *
893  * Page 2           LSI Trigger page (writes only) (not modeled)
894  * Page 3           LSI SB EOI page (reads only) (not modeled)
895  *
896  * Page 4-7         indirect TIMA
897  */
898 
899 /*
900  * IC - registers MMIO
901  */
902 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
903                                   uint64_t val, unsigned size)
904 {
905     PnvXive *xive = PNV_XIVE(opaque);
906     MemoryRegion *sysmem = get_system_memory();
907     uint32_t reg = offset >> 3;
908     bool is_chip0 = xive->chip->chip_id == 0;
909 
910     switch (offset) {
911 
912     /*
913      * XIVE CQ (PowerBus bridge) settings
914      */
915     case CQ_MSGSND:     /* msgsnd for doorbells */
916     case CQ_FIRMASK_OR: /* FIR error reporting */
917         break;
918     case CQ_PBI_CTL:
919         if (val & CQ_PBI_PC_64K) {
920             xive->pc_shift = 16;
921         }
922         if (val & CQ_PBI_VC_64K) {
923             xive->vc_shift = 16;
924         }
925         break;
926     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
927         /*
928          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
929          */
930         break;
931 
932     /*
933      * XIVE Virtualization Controller settings
934      */
935     case VC_GLOBAL_CONFIG:
936         break;
937 
938     /*
939      * XIVE Presenter Controller settings
940      */
941     case PC_GLOBAL_CONFIG:
942         /*
943          * PC_GCONF_CHIPID_OVR
944          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
945          */
946         break;
947     case PC_TCTXT_CFG:
948         /*
949          * TODO: block group support
950          */
951         break;
952     case PC_TCTXT_TRACK:
953         /*
954          * PC_TCTXT_TRACK_EN:
955          *   enable block tracking and exchange of block ownership
956          *   information between Interrupt controllers
957          */
958         break;
959 
960     /*
961      * Misc settings
962      */
963     case VC_SBC_CONFIG: /* Store EOI configuration */
964         /*
965          * Configure store EOI if required by firwmare (skiboot has removed
966          * support recently though)
967          */
968         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
969             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
970         }
971         break;
972 
973     case VC_EQC_CONFIG: /* TODO: silent escalation */
974     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
975         break;
976 
977     /*
978      * XIVE BAR settings (XSCOM only)
979      */
980     case CQ_RST_CTL:
981         /* bit4: resets all BAR registers */
982         break;
983 
984     case CQ_IC_BAR: /* IC BAR. 8 pages */
985         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
986         if (!(val & CQ_IC_BAR_VALID)) {
987             xive->ic_base = 0;
988             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
989                 memory_region_del_subregion(&xive->ic_mmio,
990                                             &xive->ic_reg_mmio);
991                 memory_region_del_subregion(&xive->ic_mmio,
992                                             &xive->ic_notify_mmio);
993                 memory_region_del_subregion(&xive->ic_mmio,
994                                             &xive->ic_lsi_mmio);
995                 memory_region_del_subregion(&xive->ic_mmio,
996                                             &xive->tm_indirect_mmio);
997 
998                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
999             }
1000         } else {
1001             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
1002             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
1003                 memory_region_add_subregion(sysmem, xive->ic_base,
1004                                             &xive->ic_mmio);
1005 
1006                 memory_region_add_subregion(&xive->ic_mmio,  0,
1007                                             &xive->ic_reg_mmio);
1008                 memory_region_add_subregion(&xive->ic_mmio,
1009                                             1ul << xive->ic_shift,
1010                                             &xive->ic_notify_mmio);
1011                 memory_region_add_subregion(&xive->ic_mmio,
1012                                             2ul << xive->ic_shift,
1013                                             &xive->ic_lsi_mmio);
1014                 memory_region_add_subregion(&xive->ic_mmio,
1015                                             4ull << xive->ic_shift,
1016                                             &xive->tm_indirect_mmio);
1017             }
1018         }
1019         break;
1020 
1021     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
1022     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
1023         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1024         if (!(val & CQ_TM_BAR_VALID)) {
1025             xive->tm_base = 0;
1026             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
1027                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1028             }
1029         } else {
1030             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1031             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
1032                 memory_region_add_subregion(sysmem, xive->tm_base,
1033                                             &xive->tm_mmio);
1034             }
1035         }
1036         break;
1037 
1038     case CQ_PC_BARM:
1039         xive->regs[reg] = val;
1040         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
1041         break;
1042     case CQ_PC_BAR: /* From 32M to 512G */
1043         if (!(val & CQ_PC_BAR_VALID)) {
1044             xive->pc_base = 0;
1045             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
1046                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
1047             }
1048         } else {
1049             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
1050             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
1051                 memory_region_add_subregion(sysmem, xive->pc_base,
1052                                             &xive->pc_mmio);
1053             }
1054         }
1055         break;
1056 
1057     case CQ_VC_BARM:
1058         xive->regs[reg] = val;
1059         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1060         break;
1061     case CQ_VC_BAR: /* From 64M to 4TB */
1062         if (!(val & CQ_VC_BAR_VALID)) {
1063             xive->vc_base = 0;
1064             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1065                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1066             }
1067         } else {
1068             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1069             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1070                 memory_region_add_subregion(sysmem, xive->vc_base,
1071                                             &xive->vc_mmio);
1072             }
1073         }
1074         break;
1075 
1076     /*
1077      * XIVE Table settings.
1078      */
1079     case CQ_TAR: /* Table Address */
1080         break;
1081     case CQ_TDR: /* Table Data */
1082         pnv_xive_table_set_data(xive, val);
1083         break;
1084 
1085     /*
1086      * XIVE VC & PC Virtual Structure Table settings
1087      */
1088     case VC_VSD_TABLE_ADDR:
1089     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1090         break;
1091     case VC_VSD_TABLE_DATA: /* Virtual table setting */
1092     case PC_VSD_TABLE_DATA:
1093         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1094         break;
1095 
1096     /*
1097      * Interrupt fifo overflow in memory backing store (Not modeled)
1098      */
1099     case VC_IRQ_CONFIG_IPI:
1100     case VC_IRQ_CONFIG_HW:
1101     case VC_IRQ_CONFIG_CASCADE1:
1102     case VC_IRQ_CONFIG_CASCADE2:
1103     case VC_IRQ_CONFIG_REDIST:
1104     case VC_IRQ_CONFIG_IPI_CASC:
1105         break;
1106 
1107     /*
1108      * XIVE hardware thread enablement
1109      */
1110     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1111     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1112         break;
1113 
1114     case PC_THREAD_EN_REG0_SET:
1115         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1116         break;
1117     case PC_THREAD_EN_REG1_SET:
1118         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1119         break;
1120     case PC_THREAD_EN_REG0_CLR:
1121         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1122         break;
1123     case PC_THREAD_EN_REG1_CLR:
1124         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1125         break;
1126 
1127     /*
1128      * Indirect TIMA access set up. Defines the PIR of the HW thread
1129      * to use.
1130      */
1131     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1132         break;
1133 
1134     /*
1135      * XIVE PC & VC cache updates for EAS, NVT and END
1136      */
1137     case VC_IVC_SCRUB_MASK:
1138     case VC_IVC_SCRUB_TRIG:
1139         break;
1140 
1141     case VC_EQC_CWATCH_SPEC:
1142         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1143         break;
1144     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1145         break;
1146     case VC_EQC_CWATCH_DAT0:
1147         /* writing to DATA0 triggers the cache write */
1148         xive->regs[reg] = val;
1149         pnv_xive_end_update(xive);
1150         break;
1151     case VC_EQC_SCRUB_MASK:
1152     case VC_EQC_SCRUB_TRIG:
1153         /*
1154          * The scrubbing registers flush the cache in RAM and can also
1155          * invalidate.
1156          */
1157         break;
1158 
1159     case PC_VPC_CWATCH_SPEC:
1160         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1161         break;
1162     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1163         break;
1164     case PC_VPC_CWATCH_DAT0:
1165         /* writing to DATA0 triggers the cache write */
1166         xive->regs[reg] = val;
1167         pnv_xive_nvt_update(xive);
1168         break;
1169     case PC_VPC_SCRUB_MASK:
1170     case PC_VPC_SCRUB_TRIG:
1171         /*
1172          * The scrubbing registers flush the cache in RAM and can also
1173          * invalidate.
1174          */
1175         break;
1176 
1177 
1178     /*
1179      * XIVE PC & VC cache invalidation
1180      */
1181     case PC_AT_KILL:
1182         break;
1183     case VC_AT_MACRO_KILL:
1184         break;
1185     case PC_AT_KILL_MASK:
1186     case VC_AT_MACRO_KILL_MASK:
1187         break;
1188 
1189     default:
1190         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1191         return;
1192     }
1193 
1194     xive->regs[reg] = val;
1195 }
1196 
1197 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1198 {
1199     PnvXive *xive = PNV_XIVE(opaque);
1200     uint64_t val = 0;
1201     uint32_t reg = offset >> 3;
1202 
1203     switch (offset) {
1204     case CQ_CFG_PB_GEN:
1205     case CQ_IC_BAR:
1206     case CQ_TM1_BAR:
1207     case CQ_TM2_BAR:
1208     case CQ_PC_BAR:
1209     case CQ_PC_BARM:
1210     case CQ_VC_BAR:
1211     case CQ_VC_BARM:
1212     case CQ_TAR:
1213     case CQ_TDR:
1214     case CQ_PBI_CTL:
1215 
1216     case PC_TCTXT_CFG:
1217     case PC_TCTXT_TRACK:
1218     case PC_TCTXT_INDIR0:
1219     case PC_TCTXT_INDIR1:
1220     case PC_TCTXT_INDIR2:
1221     case PC_TCTXT_INDIR3:
1222     case PC_GLOBAL_CONFIG:
1223 
1224     case PC_VPC_SCRUB_MASK:
1225 
1226     case VC_GLOBAL_CONFIG:
1227     case VC_AIB_TX_ORDER_TAG2:
1228 
1229     case VC_IRQ_CONFIG_IPI:
1230     case VC_IRQ_CONFIG_HW:
1231     case VC_IRQ_CONFIG_CASCADE1:
1232     case VC_IRQ_CONFIG_CASCADE2:
1233     case VC_IRQ_CONFIG_REDIST:
1234     case VC_IRQ_CONFIG_IPI_CASC:
1235 
1236     case VC_EQC_SCRUB_MASK:
1237     case VC_IVC_SCRUB_MASK:
1238     case VC_SBC_CONFIG:
1239     case VC_AT_MACRO_KILL_MASK:
1240     case VC_VSD_TABLE_ADDR:
1241     case PC_VSD_TABLE_ADDR:
1242     case VC_VSD_TABLE_DATA:
1243     case PC_VSD_TABLE_DATA:
1244     case PC_THREAD_EN_REG0:
1245     case PC_THREAD_EN_REG1:
1246         val = xive->regs[reg];
1247         break;
1248 
1249     /*
1250      * XIVE hardware thread enablement
1251      */
1252     case PC_THREAD_EN_REG0_SET:
1253     case PC_THREAD_EN_REG0_CLR:
1254         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1255         break;
1256     case PC_THREAD_EN_REG1_SET:
1257     case PC_THREAD_EN_REG1_CLR:
1258         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1259         break;
1260 
1261     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1262         val = 0xffffff0000000000;
1263         break;
1264 
1265     /*
1266      * XIVE PC & VC cache updates for EAS, NVT and END
1267      */
1268     case VC_EQC_CWATCH_SPEC:
1269         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1270         val = xive->regs[reg];
1271         break;
1272     case VC_EQC_CWATCH_DAT0:
1273         /*
1274          * Load DATA registers from cache with data requested by the
1275          * SPEC register
1276          */
1277         pnv_xive_end_cache_load(xive);
1278         val = xive->regs[reg];
1279         break;
1280     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1281         val = xive->regs[reg];
1282         break;
1283 
1284     case PC_VPC_CWATCH_SPEC:
1285         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1286         val = xive->regs[reg];
1287         break;
1288     case PC_VPC_CWATCH_DAT0:
1289         /*
1290          * Load DATA registers from cache with data requested by the
1291          * SPEC register
1292          */
1293         pnv_xive_nvt_cache_load(xive);
1294         val = xive->regs[reg];
1295         break;
1296     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1297         val = xive->regs[reg];
1298         break;
1299 
1300     case PC_VPC_SCRUB_TRIG:
1301     case VC_IVC_SCRUB_TRIG:
1302     case VC_EQC_SCRUB_TRIG:
1303         xive->regs[reg] &= ~VC_SCRUB_VALID;
1304         val = xive->regs[reg];
1305         break;
1306 
1307     /*
1308      * XIVE PC & VC cache invalidation
1309      */
1310     case PC_AT_KILL:
1311         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1312         val = xive->regs[reg];
1313         break;
1314     case VC_AT_MACRO_KILL:
1315         xive->regs[reg] &= ~VC_KILL_VALID;
1316         val = xive->regs[reg];
1317         break;
1318 
1319     /*
1320      * XIVE synchronisation
1321      */
1322     case VC_EQC_CONFIG:
1323         val = VC_EQC_SYNC_MASK;
1324         break;
1325 
1326     default:
1327         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1328     }
1329 
1330     return val;
1331 }
1332 
1333 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1334     .read = pnv_xive_ic_reg_read,
1335     .write = pnv_xive_ic_reg_write,
1336     .endianness = DEVICE_BIG_ENDIAN,
1337     .valid = {
1338         .min_access_size = 8,
1339         .max_access_size = 8,
1340     },
1341     .impl = {
1342         .min_access_size = 8,
1343         .max_access_size = 8,
1344     },
1345 };
1346 
1347 /*
1348  * IC - Notify MMIO port page (write only)
1349  */
1350 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1351 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1352 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1353 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1354 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1355 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1356 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1357 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1358 
1359 /* VC synchronisation */
1360 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1361 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1362 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1363 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1364 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1365 
1366 /* PC synchronisation */
1367 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1368 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1369 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1370 
1371 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1372 {
1373     uint8_t blk;
1374     uint32_t idx;
1375 
1376     trace_pnv_xive_ic_hw_trigger(addr, val);
1377 
1378     if (val & XIVE_TRIGGER_END) {
1379         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1380                    addr, val);
1381         return;
1382     }
1383 
1384     /*
1385      * Forward the source event notification directly to the Router.
1386      * The source interrupt number should already be correctly encoded
1387      * with the chip block id by the sending device (PHB, PSI).
1388      */
1389     blk = XIVE_EAS_BLOCK(val);
1390     idx = XIVE_EAS_INDEX(val);
1391 
1392     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1393                        !!(val & XIVE_TRIGGER_PQ));
1394 }
1395 
1396 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1397                                      unsigned size)
1398 {
1399     PnvXive *xive = PNV_XIVE(opaque);
1400 
1401     /* VC: HW triggers */
1402     switch (addr) {
1403     case 0x000 ... 0x7FF:
1404         pnv_xive_ic_hw_trigger(opaque, addr, val);
1405         break;
1406 
1407     /* VC: Forwarded IRQs */
1408     case PNV_XIVE_FORWARD_IPI:
1409     case PNV_XIVE_FORWARD_HW:
1410     case PNV_XIVE_FORWARD_OS_ESC:
1411     case PNV_XIVE_FORWARD_HW_ESC:
1412     case PNV_XIVE_FORWARD_REDIS:
1413         /* TODO: forwarded IRQs. Should be like HW triggers */
1414         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1415                    addr, val);
1416         break;
1417 
1418     /* VC syncs */
1419     case PNV_XIVE_SYNC_IPI:
1420     case PNV_XIVE_SYNC_HW:
1421     case PNV_XIVE_SYNC_OS_ESC:
1422     case PNV_XIVE_SYNC_HW_ESC:
1423     case PNV_XIVE_SYNC_REDIS:
1424         break;
1425 
1426     /* PC syncs */
1427     case PNV_XIVE_SYNC_PULL:
1428     case PNV_XIVE_SYNC_PUSH:
1429     case PNV_XIVE_SYNC_VPC:
1430         break;
1431 
1432     default:
1433         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1434     }
1435 }
1436 
1437 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1438                                         unsigned size)
1439 {
1440     PnvXive *xive = PNV_XIVE(opaque);
1441 
1442     /* loads are invalid */
1443     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1444     return -1;
1445 }
1446 
1447 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1448     .read = pnv_xive_ic_notify_read,
1449     .write = pnv_xive_ic_notify_write,
1450     .endianness = DEVICE_BIG_ENDIAN,
1451     .valid = {
1452         .min_access_size = 8,
1453         .max_access_size = 8,
1454     },
1455     .impl = {
1456         .min_access_size = 8,
1457         .max_access_size = 8,
1458     },
1459 };
1460 
1461 /*
1462  * IC - LSI MMIO handlers (not modeled)
1463  */
1464 
1465 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1466                               uint64_t val, unsigned size)
1467 {
1468     PnvXive *xive = PNV_XIVE(opaque);
1469 
1470     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1471 }
1472 
1473 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1474 {
1475     PnvXive *xive = PNV_XIVE(opaque);
1476 
1477     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1478     return -1;
1479 }
1480 
1481 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1482     .read = pnv_xive_ic_lsi_read,
1483     .write = pnv_xive_ic_lsi_write,
1484     .endianness = DEVICE_BIG_ENDIAN,
1485     .valid = {
1486         .min_access_size = 8,
1487         .max_access_size = 8,
1488     },
1489     .impl = {
1490         .min_access_size = 8,
1491         .max_access_size = 8,
1492     },
1493 };
1494 
1495 /*
1496  * IC - Indirect TIMA MMIO handlers
1497  */
1498 
1499 /*
1500  * When the TIMA is accessed from the indirect page, the thread id of
1501  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1502  * use. This is used for resets and for debug purpose also.
1503  */
1504 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1505 {
1506     PnvChip *chip = xive->chip;
1507     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1508     PowerPCCPU *cpu = NULL;
1509     int pir;
1510 
1511     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1512         xive_error(xive, "IC: no indirect TIMA access in progress");
1513         return NULL;
1514     }
1515 
1516     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1517     cpu = pnv_chip_find_cpu(chip, pir);
1518     if (!cpu) {
1519         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1520         return NULL;
1521     }
1522 
1523     /* Check that HW thread is XIVE enabled */
1524     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1525         xive_error(xive, "IC: CPU %x is not enabled", pir);
1526     }
1527 
1528     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1529 }
1530 
1531 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1532                                    uint64_t value, unsigned size)
1533 {
1534     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1535 
1536     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1537 }
1538 
1539 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1540                                       unsigned size)
1541 {
1542     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1543 
1544     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1545 }
1546 
1547 static const MemoryRegionOps xive_tm_indirect_ops = {
1548     .read = xive_tm_indirect_read,
1549     .write = xive_tm_indirect_write,
1550     .endianness = DEVICE_BIG_ENDIAN,
1551     .valid = {
1552         .min_access_size = 1,
1553         .max_access_size = 8,
1554     },
1555     .impl = {
1556         .min_access_size = 1,
1557         .max_access_size = 8,
1558     },
1559 };
1560 
1561 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1562                               uint64_t value, unsigned size)
1563 {
1564     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1565     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1566     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1567 
1568     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1569 }
1570 
1571 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1572 {
1573     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1574     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1575     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1576 
1577     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1578 }
1579 
1580 const MemoryRegionOps pnv_xive_tm_ops = {
1581     .read = pnv_xive_tm_read,
1582     .write = pnv_xive_tm_write,
1583     .endianness = DEVICE_BIG_ENDIAN,
1584     .valid = {
1585         .min_access_size = 1,
1586         .max_access_size = 8,
1587     },
1588     .impl = {
1589         .min_access_size = 1,
1590         .max_access_size = 8,
1591     },
1592 };
1593 
1594 /*
1595  * Interrupt controller XSCOM region.
1596  */
1597 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1598 {
1599     switch (addr >> 3) {
1600     case X_VC_EQC_CONFIG:
1601         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1602         return VC_EQC_SYNC_MASK;
1603     default:
1604         return pnv_xive_ic_reg_read(opaque, addr, size);
1605     }
1606 }
1607 
1608 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1609                                 uint64_t val, unsigned size)
1610 {
1611     pnv_xive_ic_reg_write(opaque, addr, val, size);
1612 }
1613 
1614 static const MemoryRegionOps pnv_xive_xscom_ops = {
1615     .read = pnv_xive_xscom_read,
1616     .write = pnv_xive_xscom_write,
1617     .endianness = DEVICE_BIG_ENDIAN,
1618     .valid = {
1619         .min_access_size = 8,
1620         .max_access_size = 8,
1621     },
1622     .impl = {
1623         .min_access_size = 8,
1624         .max_access_size = 8,
1625     }
1626 };
1627 
1628 /*
1629  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1630  */
1631 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1632                                  unsigned size)
1633 {
1634     PnvXive *xive = PNV_XIVE(opaque);
1635     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1636     uint64_t edt_type = 0;
1637     uint64_t edt_offset;
1638     MemTxResult result;
1639     AddressSpace *edt_as = NULL;
1640     uint64_t ret = -1;
1641 
1642     if (edt_index < XIVE_TABLE_EDT_MAX) {
1643         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1644     }
1645 
1646     switch (edt_type) {
1647     case CQ_TDR_EDT_IPI:
1648         edt_as = &xive->ipi_as;
1649         break;
1650     case CQ_TDR_EDT_EQ:
1651         edt_as = &xive->end_as;
1652         break;
1653     default:
1654         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1655         return -1;
1656     }
1657 
1658     /* Remap the offset for the targeted address space */
1659     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1660 
1661     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1662                             &result);
1663 
1664     if (result != MEMTX_OK) {
1665         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1666                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1667                    offset, edt_offset);
1668         return -1;
1669     }
1670 
1671     return ret;
1672 }
1673 
1674 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1675                               uint64_t val, unsigned size)
1676 {
1677     PnvXive *xive = PNV_XIVE(opaque);
1678     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1679     uint64_t edt_type = 0;
1680     uint64_t edt_offset;
1681     MemTxResult result;
1682     AddressSpace *edt_as = NULL;
1683 
1684     if (edt_index < XIVE_TABLE_EDT_MAX) {
1685         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1686     }
1687 
1688     switch (edt_type) {
1689     case CQ_TDR_EDT_IPI:
1690         edt_as = &xive->ipi_as;
1691         break;
1692     case CQ_TDR_EDT_EQ:
1693         edt_as = &xive->end_as;
1694         break;
1695     default:
1696         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1697                    offset);
1698         return;
1699     }
1700 
1701     /* Remap the offset for the targeted address space */
1702     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1703 
1704     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1705     if (result != MEMTX_OK) {
1706         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1707     }
1708 }
1709 
1710 static const MemoryRegionOps pnv_xive_vc_ops = {
1711     .read = pnv_xive_vc_read,
1712     .write = pnv_xive_vc_write,
1713     .endianness = DEVICE_BIG_ENDIAN,
1714     .valid = {
1715         .min_access_size = 8,
1716         .max_access_size = 8,
1717     },
1718     .impl = {
1719         .min_access_size = 8,
1720         .max_access_size = 8,
1721     },
1722 };
1723 
1724 /*
1725  * Presenter Controller MMIO region. The Virtualization Controller
1726  * updates the IPB in the NVT table when required. Not modeled.
1727  */
1728 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1729                                  unsigned size)
1730 {
1731     PnvXive *xive = PNV_XIVE(opaque);
1732 
1733     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1734     return -1;
1735 }
1736 
1737 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1738                               uint64_t value, unsigned size)
1739 {
1740     PnvXive *xive = PNV_XIVE(opaque);
1741 
1742     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1743 }
1744 
1745 static const MemoryRegionOps pnv_xive_pc_ops = {
1746     .read = pnv_xive_pc_read,
1747     .write = pnv_xive_pc_write,
1748     .endianness = DEVICE_BIG_ENDIAN,
1749     .valid = {
1750         .min_access_size = 8,
1751         .max_access_size = 8,
1752     },
1753     .impl = {
1754         .min_access_size = 8,
1755         .max_access_size = 8,
1756     },
1757 };
1758 
1759 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1760                                     Monitor *mon)
1761 {
1762     uint8_t  eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1763     uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1764 
1765     if (!xive_nvt_is_valid(nvt)) {
1766         return;
1767     }
1768 
1769     monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1770                    eq_blk, eq_idx,
1771                    xive_get_field32(NVT_W4_IPB, nvt->w4));
1772 }
1773 
1774 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1775 {
1776     XiveRouter *xrtr = XIVE_ROUTER(xive);
1777     uint8_t blk = pnv_xive_block_id(xive);
1778     uint8_t chip_id = xive->chip->chip_id;
1779     uint32_t srcno0 = XIVE_EAS(blk, 0);
1780     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1781     XiveEAS eas;
1782     XiveEND end;
1783     XiveNVT nvt;
1784     int i;
1785     uint64_t xive_nvt_per_subpage;
1786 
1787     monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1788                    srcno0, srcno0 + nr_ipis - 1);
1789     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1790 
1791     monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1792                    srcno0, srcno0 + nr_ipis - 1);
1793     for (i = 0; i < nr_ipis; i++) {
1794         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1795             break;
1796         }
1797         if (!xive_eas_is_masked(&eas)) {
1798             xive_eas_pic_print_info(&eas, i, mon);
1799         }
1800     }
1801 
1802     monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1803     i = 0;
1804     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1805         xive_end_pic_print_info(&end, i++, mon);
1806     }
1807 
1808     monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1809     i = 0;
1810     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1811         xive_end_eas_pic_print_info(&end, i++, mon);
1812     }
1813 
1814     monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1815                    0, XIVE_NVT_COUNT - 1);
1816     xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1817     for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1818         while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1819             xive_nvt_pic_print_info(&nvt, i++, mon);
1820         }
1821     }
1822 }
1823 
1824 static void pnv_xive_reset(void *dev)
1825 {
1826     PnvXive *xive = PNV_XIVE(dev);
1827     XiveSource *xsrc = &xive->ipi_source;
1828     XiveENDSource *end_xsrc = &xive->end_source;
1829 
1830     /* Default page size (Should be changed at runtime to 64k) */
1831     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1832 
1833     /* Clear subregions */
1834     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1835         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1836     }
1837 
1838     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1839         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1840     }
1841 
1842     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1843         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1844     }
1845 
1846     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1847         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1848     }
1849 }
1850 
1851 static void pnv_xive_init(Object *obj)
1852 {
1853     PnvXive *xive = PNV_XIVE(obj);
1854 
1855     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1856                             TYPE_XIVE_SOURCE);
1857     object_initialize_child(obj, "end_source", &xive->end_source,
1858                             TYPE_XIVE_END_SOURCE);
1859 }
1860 
1861 /*
1862  *  Maximum number of IRQs and ENDs supported by HW
1863  */
1864 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1865 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1866 
1867 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1868 {
1869     PnvXive *xive = PNV_XIVE(dev);
1870     PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1871     XiveSource *xsrc = &xive->ipi_source;
1872     XiveENDSource *end_xsrc = &xive->end_source;
1873     Error *local_err = NULL;
1874 
1875     pxc->parent_realize(dev, &local_err);
1876     if (local_err) {
1877         error_propagate(errp, local_err);
1878         return;
1879     }
1880 
1881     assert(xive->chip);
1882 
1883     /*
1884      * The XiveSource and XiveENDSource objects are realized with the
1885      * maximum allowed HW configuration. The ESB MMIO regions will be
1886      * resized dynamically when the controller is configured by the FW
1887      * to limit accesses to resources not provisioned.
1888      */
1889     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1890                             &error_fatal);
1891     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1892     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1893         return;
1894     }
1895 
1896     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1897                             &error_fatal);
1898     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1899                              &error_abort);
1900     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1901         return;
1902     }
1903 
1904     /* Default page size. Generally changed at runtime to 64k */
1905     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1906 
1907     /* XSCOM region, used for initial configuration of the BARs */
1908     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1909                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1910 
1911     /* Interrupt controller MMIO regions */
1912     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1913                        PNV9_XIVE_IC_SIZE);
1914 
1915     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1916                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1917     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1918                           &pnv_xive_ic_notify_ops,
1919                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1920 
1921     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1922     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1923                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1924 
1925     /* Thread Interrupt Management Area (Indirect) */
1926     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1927                           &xive_tm_indirect_ops,
1928                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1929     /*
1930      * Overall Virtualization Controller MMIO region containing the
1931      * IPI ESB pages and END ESB pages. The layout is defined by the
1932      * EDT "Domain table" and the accesses are dispatched using
1933      * address spaces for each.
1934      */
1935     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1936                           "xive-vc", PNV9_XIVE_VC_SIZE);
1937 
1938     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1939                        PNV9_XIVE_VC_SIZE);
1940     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1941     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1942                        PNV9_XIVE_VC_SIZE);
1943     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1944 
1945     /*
1946      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1947      * VC region. Their size is configured by the FW in the EDT table.
1948      */
1949     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1950     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1951 
1952     /* Presenter Controller MMIO region (not modeled) */
1953     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1954                           "xive-pc", PNV9_XIVE_PC_SIZE);
1955 
1956     /* Thread Interrupt Management Area (Direct) */
1957     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
1958                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1959 
1960     qemu_register_reset(pnv_xive_reset, dev);
1961 }
1962 
1963 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1964                              int xscom_offset)
1965 {
1966     const char compat[] = "ibm,power9-xive-x";
1967     char *name;
1968     int offset;
1969     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1970     uint32_t reg[] = {
1971         cpu_to_be32(lpc_pcba),
1972         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1973     };
1974 
1975     name = g_strdup_printf("xive@%x", lpc_pcba);
1976     offset = fdt_add_subnode(fdt, xscom_offset, name);
1977     _FDT(offset);
1978     g_free(name);
1979 
1980     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1981     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1982                       sizeof(compat))));
1983     return 0;
1984 }
1985 
1986 static Property pnv_xive_properties[] = {
1987     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1988     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1989     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1990     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1991     /* The PnvChip id identifies the XIVE interrupt controller. */
1992     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1993     DEFINE_PROP_END_OF_LIST(),
1994 };
1995 
1996 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1997 {
1998     DeviceClass *dc = DEVICE_CLASS(klass);
1999     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2000     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
2001     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2002     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2003     PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
2004 
2005     xdc->dt_xscom = pnv_xive_dt_xscom;
2006 
2007     dc->desc = "PowerNV XIVE Interrupt Controller";
2008     device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
2009     dc->realize = pnv_xive_realize;
2010     device_class_set_props(dc, pnv_xive_properties);
2011 
2012     xrc->get_eas = pnv_xive_get_eas;
2013     xrc->get_pq = pnv_xive_get_pq;
2014     xrc->set_pq = pnv_xive_set_pq;
2015     xrc->get_end = pnv_xive_get_end;
2016     xrc->write_end = pnv_xive_write_end;
2017     xrc->get_nvt = pnv_xive_get_nvt;
2018     xrc->write_nvt = pnv_xive_write_nvt;
2019     xrc->get_block_id = pnv_xive_get_block_id;
2020 
2021     xnc->notify = pnv_xive_notify;
2022     xpc->match_nvt  = pnv_xive_match_nvt;
2023     xpc->get_config = pnv_xive_presenter_get_config;
2024 };
2025 
2026 static const TypeInfo pnv_xive_info = {
2027     .name          = TYPE_PNV_XIVE,
2028     .parent        = TYPE_XIVE_ROUTER,
2029     .instance_init = pnv_xive_init,
2030     .instance_size = sizeof(PnvXive),
2031     .class_init    = pnv_xive_class_init,
2032     .class_size    = sizeof(PnvXiveClass),
2033     .interfaces    = (InterfaceInfo[]) {
2034         { TYPE_PNV_XSCOM_INTERFACE },
2035         { }
2036     }
2037 };
2038 
2039 static void pnv_xive_register_types(void)
2040 {
2041     type_register_static(&pnv_xive_info);
2042 }
2043 
2044 type_init(pnv_xive_register_types)
2045