xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision dc2526e45a0ffebc88d7ed007d906f669827f834)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
27 
28 #include <libfdt.h>
29 
30 #include "pnv_xive_regs.h"
31 
32 #undef XIVE_DEBUG
33 
34 /*
35  * Virtual structures table (VST)
36  */
37 #define SBE_PER_BYTE   4
38 
39 typedef struct XiveVstInfo {
40     const char *name;
41     uint32_t    size;
42     uint32_t    max_blocks;
43 } XiveVstInfo;
44 
45 static const XiveVstInfo vst_infos[] = {
46     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
47     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
48     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
49     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
50 
51     /*
52      *  Interrupt fifo backing store table (not modeled) :
53      *
54      * 0 - IPI,
55      * 1 - HWD,
56      * 2 - First escalate,
57      * 3 - Second escalate,
58      * 4 - Redistribution,
59      * 5 - IPI cascaded queue ?
60      */
61     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
62 };
63 
64 #define xive_error(xive, fmt, ...)                                      \
65     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
66                   (xive)->chip->chip_id, ## __VA_ARGS__);
67 
68 /*
69  * QEMU version of the GETFIELD/SETFIELD macros
70  *
71  * TODO: It might be better to use the existing extract64() and
72  * deposit64() but this means that all the register definitions will
73  * change and become incompatible with the ones found in skiboot.
74  *
75  * Keep it as it is for now until we find a common ground.
76  */
77 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
78 {
79     return (word & mask) >> ctz64(mask);
80 }
81 
82 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
83                                 uint64_t value)
84 {
85     return (word & ~mask) | ((value << ctz64(mask)) & mask);
86 }
87 
88 /*
89  * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
90  * field overrides the hardwired chip ID in the Powerbus operations
91  * and for CAM compares
92  */
93 static uint8_t pnv_xive_block_id(PnvXive *xive)
94 {
95     uint8_t blk = xive->chip->chip_id;
96     uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
97 
98     if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
99         blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
100     }
101 
102     return blk;
103 }
104 
105 /*
106  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
107  * of the chips is good enough.
108  *
109  * TODO: Block scope support
110  */
111 static PnvXive *pnv_xive_get_remote(uint8_t blk)
112 {
113     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
114     int i;
115 
116     for (i = 0; i < pnv->num_chips; i++) {
117         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
118         PnvXive *xive = &chip9->xive;
119 
120         if (pnv_xive_block_id(xive) == blk) {
121             return xive;
122         }
123     }
124     return NULL;
125 }
126 
127 /*
128  * VST accessors for SBE, EAT, ENDT, NVT
129  *
130  * Indirect VST tables are arrays of VSDs pointing to a page (of same
131  * size). Each page is a direct VST table.
132  */
133 
134 #define XIVE_VSD_SIZE 8
135 
136 /* Indirect page size can be 4K, 64K, 2M, 16M. */
137 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
138 {
139      return page_shift == 12 || page_shift == 16 ||
140          page_shift == 21 || page_shift == 24;
141 }
142 
143 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
144                                          uint64_t vsd, uint32_t idx)
145 {
146     const XiveVstInfo *info = &vst_infos[type];
147     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
148     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
149     uint32_t idx_max;
150 
151     idx_max = vst_tsize / info->size - 1;
152     if (idx > idx_max) {
153 #ifdef XIVE_DEBUG
154         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
155                    info->name, idx, idx_max);
156 #endif
157         return 0;
158     }
159 
160     return vst_addr + idx * info->size;
161 }
162 
163 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
164                                            uint64_t vsd, uint32_t idx)
165 {
166     const XiveVstInfo *info = &vst_infos[type];
167     uint64_t vsd_addr;
168     uint32_t vsd_idx;
169     uint32_t page_shift;
170     uint32_t vst_per_page;
171 
172     /* Get the page size of the indirect table. */
173     vsd_addr = vsd & VSD_ADDRESS_MASK;
174     vsd = ldq_be_dma(&address_space_memory, vsd_addr);
175 
176     if (!(vsd & VSD_ADDRESS_MASK)) {
177 #ifdef XIVE_DEBUG
178         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
179 #endif
180         return 0;
181     }
182 
183     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
184 
185     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
186         xive_error(xive, "VST: invalid %s page shift %d", info->name,
187                    page_shift);
188         return 0;
189     }
190 
191     vst_per_page = (1ull << page_shift) / info->size;
192     vsd_idx = idx / vst_per_page;
193 
194     /* Load the VSD we are looking for, if not already done */
195     if (vsd_idx) {
196         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
197         vsd = ldq_be_dma(&address_space_memory, vsd_addr);
198 
199         if (!(vsd & VSD_ADDRESS_MASK)) {
200 #ifdef XIVE_DEBUG
201             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
202 #endif
203             return 0;
204         }
205 
206         /*
207          * Check that the pages have a consistent size across the
208          * indirect table
209          */
210         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
211             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
212                        info->name, idx);
213             return 0;
214         }
215     }
216 
217     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
218 }
219 
220 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
221                                   uint32_t idx)
222 {
223     const XiveVstInfo *info = &vst_infos[type];
224     uint64_t vsd;
225 
226     if (blk >= info->max_blocks) {
227         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
228                    blk, info->name, idx);
229         return 0;
230     }
231 
232     vsd = xive->vsds[type][blk];
233 
234     /* Remote VST access */
235     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
236         xive = pnv_xive_get_remote(blk);
237 
238         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
239     }
240 
241     if (VSD_INDIRECT & vsd) {
242         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
243     }
244 
245     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
246 }
247 
248 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
249                              uint32_t idx, void *data)
250 {
251     const XiveVstInfo *info = &vst_infos[type];
252     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
253 
254     if (!addr) {
255         return -1;
256     }
257 
258     cpu_physical_memory_read(addr, data, info->size);
259     return 0;
260 }
261 
262 #define XIVE_VST_WORD_ALL -1
263 
264 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
265                               uint32_t idx, void *data, uint32_t word_number)
266 {
267     const XiveVstInfo *info = &vst_infos[type];
268     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
269 
270     if (!addr) {
271         return -1;
272     }
273 
274     if (word_number == XIVE_VST_WORD_ALL) {
275         cpu_physical_memory_write(addr, data, info->size);
276     } else {
277         cpu_physical_memory_write(addr + word_number * 4,
278                                   data + word_number * 4, 4);
279     }
280     return 0;
281 }
282 
283 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
284                             XiveEND *end)
285 {
286     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
287 }
288 
289 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
290                               XiveEND *end, uint8_t word_number)
291 {
292     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
293                               word_number);
294 }
295 
296 static int pnv_xive_end_update(PnvXive *xive)
297 {
298     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
299                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
300     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
301                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
302     int i;
303     uint64_t eqc_watch[4];
304 
305     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
306         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
307     }
308 
309     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
310                               XIVE_VST_WORD_ALL);
311 }
312 
313 static void pnv_xive_end_cache_load(PnvXive *xive)
314 {
315     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
316                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
317     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
318                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
319     uint64_t eqc_watch[4] = { 0 };
320     int i;
321 
322     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
323         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
324     }
325 
326     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
327         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
328     }
329 }
330 
331 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
332                             XiveNVT *nvt)
333 {
334     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
335 }
336 
337 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
338                               XiveNVT *nvt, uint8_t word_number)
339 {
340     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
341                               word_number);
342 }
343 
344 static int pnv_xive_nvt_update(PnvXive *xive)
345 {
346     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
347                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
348     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
349                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
350     int i;
351     uint64_t vpc_watch[8];
352 
353     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
354         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
355     }
356 
357     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
358                               XIVE_VST_WORD_ALL);
359 }
360 
361 static void pnv_xive_nvt_cache_load(PnvXive *xive)
362 {
363     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
364                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
365     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
366                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
367     uint64_t vpc_watch[8] = { 0 };
368     int i;
369 
370     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
371         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
372     }
373 
374     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
375         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
376     }
377 }
378 
379 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
380                             XiveEAS *eas)
381 {
382     PnvXive *xive = PNV_XIVE(xrtr);
383 
384     /*
385      * EAT lookups should be local to the IC
386      */
387     if (pnv_xive_block_id(xive) != blk) {
388         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
389         return -1;
390     }
391 
392     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
393 }
394 
395 /*
396  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
397  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
398  * second register covers cores 16-23 (normal) or 8-11 (fused).
399  */
400 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
401 {
402     int pir = ppc_cpu_pir(cpu);
403     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
404     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
405     uint32_t bit = pir & 0x3f;
406 
407     return xive->regs[reg >> 3] & PPC_BIT(bit);
408 }
409 
410 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
411                               uint8_t nvt_blk, uint32_t nvt_idx,
412                               bool cam_ignore, uint8_t priority,
413                               uint32_t logic_serv, XiveTCTXMatch *match)
414 {
415     PnvXive *xive = PNV_XIVE(xptr);
416     PnvChip *chip = xive->chip;
417     int count = 0;
418     int i, j;
419 
420     for (i = 0; i < chip->nr_cores; i++) {
421         PnvCore *pc = chip->cores[i];
422         CPUCore *cc = CPU_CORE(pc);
423 
424         for (j = 0; j < cc->nr_threads; j++) {
425             PowerPCCPU *cpu = pc->threads[j];
426             XiveTCTX *tctx;
427             int ring;
428 
429             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
430                 continue;
431             }
432 
433             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
434 
435             /*
436              * Check the thread context CAM lines and record matches.
437              */
438             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
439                                              nvt_idx, cam_ignore, logic_serv);
440             /*
441              * Save the context and follow on to catch duplicates, that we
442              * don't support yet.
443              */
444             if (ring != -1) {
445                 if (match->tctx) {
446                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
447                                   "thread context NVT %x/%x\n",
448                                   nvt_blk, nvt_idx);
449                     return -1;
450                 }
451 
452                 match->ring = ring;
453                 match->tctx = tctx;
454                 count++;
455             }
456         }
457     }
458 
459     return count;
460 }
461 
462 /*
463  * The TIMA MMIO space is shared among the chips and to identify the
464  * chip from which the access is being done, we extract the chip id
465  * from the PIR.
466  */
467 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
468 {
469     int pir = ppc_cpu_pir(cpu);
470     PnvChip *chip;
471     PnvXive *xive;
472 
473     chip = pnv_get_chip(PNV9_PIR2CHIP(pir));
474     assert(chip);
475     xive = &PNV9_CHIP(chip)->xive;
476 
477     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
478         xive_error(xive, "IC: CPU %x is not enabled", pir);
479     }
480     return xive;
481 }
482 
483 /*
484  * The internal sources (IPIs) of the interrupt controller have no
485  * knowledge of the XIVE chip on which they reside. Encode the block
486  * id in the source interrupt number before forwarding the source
487  * event notification to the Router. This is required on a multichip
488  * system.
489  */
490 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
491 {
492     PnvXive *xive = PNV_XIVE(xn);
493     uint8_t blk = pnv_xive_block_id(xive);
494 
495     xive_router_notify(xn, XIVE_EAS(blk, srcno));
496 }
497 
498 /*
499  * XIVE helpers
500  */
501 
502 static uint64_t pnv_xive_vc_size(PnvXive *xive)
503 {
504     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
505 }
506 
507 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
508 {
509     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
510 }
511 
512 static uint64_t pnv_xive_pc_size(PnvXive *xive)
513 {
514     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
515 }
516 
517 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
518 {
519     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
520     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
521 
522     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
523 }
524 
525 /*
526  * EDT Table
527  *
528  * The Virtualization Controller MMIO region containing the IPI ESB
529  * pages and END ESB pages is sub-divided into "sets" which map
530  * portions of the VC region to the different ESB pages. It is
531  * configured at runtime through the EDT "Domain Table" to let the
532  * firmware decide how to split the VC address space between IPI ESB
533  * pages and END ESB pages.
534  */
535 
536 /*
537  * Computes the overall size of the IPI or the END ESB pages
538  */
539 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
540 {
541     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
542     uint64_t size = 0;
543     int i;
544 
545     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
546         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
547 
548         if (edt_type == type) {
549             size += edt_size;
550         }
551     }
552 
553     return size;
554 }
555 
556 /*
557  * Maps an offset of the VC region in the IPI or END region using the
558  * layout defined by the EDT "Domaine Table"
559  */
560 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
561                                               uint64_t type)
562 {
563     int i;
564     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
565     uint64_t edt_offset = vc_offset;
566 
567     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
568         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
569 
570         if (edt_type != type) {
571             edt_offset -= edt_size;
572         }
573     }
574 
575     return edt_offset;
576 }
577 
578 static void pnv_xive_edt_resize(PnvXive *xive)
579 {
580     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
581     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
582 
583     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
584     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
585 
586     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
587     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
588 }
589 
590 /*
591  * XIVE Table configuration. Only EDT is supported.
592  */
593 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
594 {
595     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
596     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
597     uint64_t *xive_table;
598     uint8_t max_index;
599 
600     switch (tsel) {
601     case CQ_TAR_TSEL_BLK:
602         max_index = ARRAY_SIZE(xive->blk);
603         xive_table = xive->blk;
604         break;
605     case CQ_TAR_TSEL_MIG:
606         max_index = ARRAY_SIZE(xive->mig);
607         xive_table = xive->mig;
608         break;
609     case CQ_TAR_TSEL_EDT:
610         max_index = ARRAY_SIZE(xive->edt);
611         xive_table = xive->edt;
612         break;
613     case CQ_TAR_TSEL_VDT:
614         max_index = ARRAY_SIZE(xive->vdt);
615         xive_table = xive->vdt;
616         break;
617     default:
618         xive_error(xive, "IC: invalid table %d", (int) tsel);
619         return -1;
620     }
621 
622     if (tsel_index >= max_index) {
623         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
624         return -1;
625     }
626 
627     xive_table[tsel_index] = val;
628 
629     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
630         xive->regs[CQ_TAR >> 3] =
631             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
632     }
633 
634     /*
635      * EDT configuration is complete. Resize the MMIO windows exposing
636      * the IPI and the END ESBs in the VC region.
637      */
638     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
639         pnv_xive_edt_resize(xive);
640     }
641 
642     return 0;
643 }
644 
645 /*
646  * Virtual Structure Tables (VST) configuration
647  */
648 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
649                                        uint8_t blk, uint64_t vsd)
650 {
651     XiveENDSource *end_xsrc = &xive->end_source;
652     XiveSource *xsrc = &xive->ipi_source;
653     const XiveVstInfo *info = &vst_infos[type];
654     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
655     uint64_t vst_tsize = 1ull << page_shift;
656     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
657 
658     /* Basic checks */
659 
660     if (VSD_INDIRECT & vsd) {
661         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
662             xive_error(xive, "VST: %s indirect tables are not enabled",
663                        info->name);
664             return;
665         }
666 
667         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
668             xive_error(xive, "VST: invalid %s page shift %d", info->name,
669                        page_shift);
670             return;
671         }
672     }
673 
674     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
675         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
676                    " page shift %d", info->name, vst_addr, page_shift);
677         return;
678     }
679 
680     /* Record the table configuration (in SRAM on HW) */
681     xive->vsds[type][blk] = vsd;
682 
683     /* Now tune the models with the configuration provided by the FW */
684 
685     switch (type) {
686     case VST_TSEL_IVT:  /* Nothing to be done */
687         break;
688 
689     case VST_TSEL_EQDT:
690         /*
691          * Backing store pages for the END.
692          *
693          * If the table is direct, we can compute the number of PQ
694          * entries provisioned by FW (such as skiboot) and resize the
695          * END ESB window accordingly.
696          */
697         if (!(VSD_INDIRECT & vsd)) {
698             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
699                                    * (1ull << xsrc->esb_shift));
700         }
701         memory_region_add_subregion(&xive->end_edt_mmio, 0,
702                                     &end_xsrc->esb_mmio);
703         break;
704 
705     case VST_TSEL_SBE:
706         /*
707          * Backing store pages for the source PQ bits. The model does
708          * not use these PQ bits backed in RAM because the XiveSource
709          * model has its own.
710          *
711          * If the table is direct, we can compute the number of PQ
712          * entries provisioned by FW (such as skiboot) and resize the
713          * ESB window accordingly.
714          */
715         if (!(VSD_INDIRECT & vsd)) {
716             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
717                                    * (1ull << xsrc->esb_shift));
718         }
719         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
720         break;
721 
722     case VST_TSEL_VPDT: /* Not modeled */
723     case VST_TSEL_IRQ:  /* Not modeled */
724         /*
725          * These tables contains the backing store pages for the
726          * interrupt fifos of the VC sub-engine in case of overflow.
727          */
728         break;
729 
730     default:
731         g_assert_not_reached();
732     }
733 }
734 
735 /*
736  * Both PC and VC sub-engines are configured as each use the Virtual
737  * Structure Tables : SBE, EAS, END and NVT.
738  */
739 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
740 {
741     uint8_t mode = GETFIELD(VSD_MODE, vsd);
742     uint8_t type = GETFIELD(VST_TABLE_SELECT,
743                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
744     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
745                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
746     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
747 
748     if (type > VST_TSEL_IRQ) {
749         xive_error(xive, "VST: invalid table type %d", type);
750         return;
751     }
752 
753     if (blk >= vst_infos[type].max_blocks) {
754         xive_error(xive, "VST: invalid block id %d for"
755                       " %s table", blk, vst_infos[type].name);
756         return;
757     }
758 
759     /*
760      * Only take the VC sub-engine configuration into account because
761      * the XiveRouter model combines both VC and PC sub-engines
762      */
763     if (pc_engine) {
764         return;
765     }
766 
767     if (!vst_addr) {
768         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
769         return;
770     }
771 
772     switch (mode) {
773     case VSD_MODE_FORWARD:
774         xive->vsds[type][blk] = vsd;
775         break;
776 
777     case VSD_MODE_EXCLUSIVE:
778         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
779         break;
780 
781     default:
782         xive_error(xive, "VST: unsupported table mode %d", mode);
783         return;
784     }
785 }
786 
787 /*
788  * Interrupt controller MMIO region. The layout is compatible between
789  * 4K and 64K pages :
790  *
791  * Page 0           sub-engine BARs
792  *  0x000 - 0x3FF   IC registers
793  *  0x400 - 0x7FF   PC registers
794  *  0x800 - 0xFFF   VC registers
795  *
796  * Page 1           Notify page (writes only)
797  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
798  *  0x800 - 0xFFF   forwards and syncs
799  *
800  * Page 2           LSI Trigger page (writes only) (not modeled)
801  * Page 3           LSI SB EOI page (reads only) (not modeled)
802  *
803  * Page 4-7         indirect TIMA
804  */
805 
806 /*
807  * IC - registers MMIO
808  */
809 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
810                                   uint64_t val, unsigned size)
811 {
812     PnvXive *xive = PNV_XIVE(opaque);
813     MemoryRegion *sysmem = get_system_memory();
814     uint32_t reg = offset >> 3;
815     bool is_chip0 = xive->chip->chip_id == 0;
816 
817     switch (offset) {
818 
819     /*
820      * XIVE CQ (PowerBus bridge) settings
821      */
822     case CQ_MSGSND:     /* msgsnd for doorbells */
823     case CQ_FIRMASK_OR: /* FIR error reporting */
824         break;
825     case CQ_PBI_CTL:
826         if (val & CQ_PBI_PC_64K) {
827             xive->pc_shift = 16;
828         }
829         if (val & CQ_PBI_VC_64K) {
830             xive->vc_shift = 16;
831         }
832         break;
833     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
834         /*
835          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
836          */
837         break;
838 
839     /*
840      * XIVE Virtualization Controller settings
841      */
842     case VC_GLOBAL_CONFIG:
843         break;
844 
845     /*
846      * XIVE Presenter Controller settings
847      */
848     case PC_GLOBAL_CONFIG:
849         /*
850          * PC_GCONF_CHIPID_OVR
851          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
852          */
853         break;
854     case PC_TCTXT_CFG:
855         /*
856          * TODO: block group support
857          */
858         break;
859     case PC_TCTXT_TRACK:
860         /*
861          * PC_TCTXT_TRACK_EN:
862          *   enable block tracking and exchange of block ownership
863          *   information between Interrupt controllers
864          */
865         break;
866 
867     /*
868      * Misc settings
869      */
870     case VC_SBC_CONFIG: /* Store EOI configuration */
871         /*
872          * Configure store EOI if required by firwmare (skiboot has removed
873          * support recently though)
874          */
875         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
876             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
877         }
878         break;
879 
880     case VC_EQC_CONFIG: /* TODO: silent escalation */
881     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
882         break;
883 
884     /*
885      * XIVE BAR settings (XSCOM only)
886      */
887     case CQ_RST_CTL:
888         /* bit4: resets all BAR registers */
889         break;
890 
891     case CQ_IC_BAR: /* IC BAR. 8 pages */
892         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
893         if (!(val & CQ_IC_BAR_VALID)) {
894             xive->ic_base = 0;
895             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
896                 memory_region_del_subregion(&xive->ic_mmio,
897                                             &xive->ic_reg_mmio);
898                 memory_region_del_subregion(&xive->ic_mmio,
899                                             &xive->ic_notify_mmio);
900                 memory_region_del_subregion(&xive->ic_mmio,
901                                             &xive->ic_lsi_mmio);
902                 memory_region_del_subregion(&xive->ic_mmio,
903                                             &xive->tm_indirect_mmio);
904 
905                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
906             }
907         } else {
908             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
909             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
910                 memory_region_add_subregion(sysmem, xive->ic_base,
911                                             &xive->ic_mmio);
912 
913                 memory_region_add_subregion(&xive->ic_mmio,  0,
914                                             &xive->ic_reg_mmio);
915                 memory_region_add_subregion(&xive->ic_mmio,
916                                             1ul << xive->ic_shift,
917                                             &xive->ic_notify_mmio);
918                 memory_region_add_subregion(&xive->ic_mmio,
919                                             2ul << xive->ic_shift,
920                                             &xive->ic_lsi_mmio);
921                 memory_region_add_subregion(&xive->ic_mmio,
922                                             4ull << xive->ic_shift,
923                                             &xive->tm_indirect_mmio);
924             }
925         }
926         break;
927 
928     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
929     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
930         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
931         if (!(val & CQ_TM_BAR_VALID)) {
932             xive->tm_base = 0;
933             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
934                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
935             }
936         } else {
937             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
938             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
939                 memory_region_add_subregion(sysmem, xive->tm_base,
940                                             &xive->tm_mmio);
941             }
942         }
943         break;
944 
945     case CQ_PC_BARM:
946         xive->regs[reg] = val;
947         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
948         break;
949     case CQ_PC_BAR: /* From 32M to 512G */
950         if (!(val & CQ_PC_BAR_VALID)) {
951             xive->pc_base = 0;
952             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
953                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
954             }
955         } else {
956             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
957             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
958                 memory_region_add_subregion(sysmem, xive->pc_base,
959                                             &xive->pc_mmio);
960             }
961         }
962         break;
963 
964     case CQ_VC_BARM:
965         xive->regs[reg] = val;
966         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
967         break;
968     case CQ_VC_BAR: /* From 64M to 4TB */
969         if (!(val & CQ_VC_BAR_VALID)) {
970             xive->vc_base = 0;
971             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
972                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
973             }
974         } else {
975             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
976             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
977                 memory_region_add_subregion(sysmem, xive->vc_base,
978                                             &xive->vc_mmio);
979             }
980         }
981         break;
982 
983     /*
984      * XIVE Table settings.
985      */
986     case CQ_TAR: /* Table Address */
987         break;
988     case CQ_TDR: /* Table Data */
989         pnv_xive_table_set_data(xive, val);
990         break;
991 
992     /*
993      * XIVE VC & PC Virtual Structure Table settings
994      */
995     case VC_VSD_TABLE_ADDR:
996     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
997         break;
998     case VC_VSD_TABLE_DATA: /* Virtual table setting */
999     case PC_VSD_TABLE_DATA:
1000         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1001         break;
1002 
1003     /*
1004      * Interrupt fifo overflow in memory backing store (Not modeled)
1005      */
1006     case VC_IRQ_CONFIG_IPI:
1007     case VC_IRQ_CONFIG_HW:
1008     case VC_IRQ_CONFIG_CASCADE1:
1009     case VC_IRQ_CONFIG_CASCADE2:
1010     case VC_IRQ_CONFIG_REDIST:
1011     case VC_IRQ_CONFIG_IPI_CASC:
1012         break;
1013 
1014     /*
1015      * XIVE hardware thread enablement
1016      */
1017     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1018     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1019         break;
1020 
1021     case PC_THREAD_EN_REG0_SET:
1022         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1023         break;
1024     case PC_THREAD_EN_REG1_SET:
1025         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1026         break;
1027     case PC_THREAD_EN_REG0_CLR:
1028         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1029         break;
1030     case PC_THREAD_EN_REG1_CLR:
1031         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1032         break;
1033 
1034     /*
1035      * Indirect TIMA access set up. Defines the PIR of the HW thread
1036      * to use.
1037      */
1038     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1039         break;
1040 
1041     /*
1042      * XIVE PC & VC cache updates for EAS, NVT and END
1043      */
1044     case VC_IVC_SCRUB_MASK:
1045     case VC_IVC_SCRUB_TRIG:
1046         break;
1047 
1048     case VC_EQC_CWATCH_SPEC:
1049         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1050         break;
1051     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1052         break;
1053     case VC_EQC_CWATCH_DAT0:
1054         /* writing to DATA0 triggers the cache write */
1055         xive->regs[reg] = val;
1056         pnv_xive_end_update(xive);
1057         break;
1058     case VC_EQC_SCRUB_MASK:
1059     case VC_EQC_SCRUB_TRIG:
1060         /*
1061          * The scrubbing registers flush the cache in RAM and can also
1062          * invalidate.
1063          */
1064         break;
1065 
1066     case PC_VPC_CWATCH_SPEC:
1067         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1068         break;
1069     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1070         break;
1071     case PC_VPC_CWATCH_DAT0:
1072         /* writing to DATA0 triggers the cache write */
1073         xive->regs[reg] = val;
1074         pnv_xive_nvt_update(xive);
1075         break;
1076     case PC_VPC_SCRUB_MASK:
1077     case PC_VPC_SCRUB_TRIG:
1078         /*
1079          * The scrubbing registers flush the cache in RAM and can also
1080          * invalidate.
1081          */
1082         break;
1083 
1084 
1085     /*
1086      * XIVE PC & VC cache invalidation
1087      */
1088     case PC_AT_KILL:
1089         break;
1090     case VC_AT_MACRO_KILL:
1091         break;
1092     case PC_AT_KILL_MASK:
1093     case VC_AT_MACRO_KILL_MASK:
1094         break;
1095 
1096     default:
1097         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1098         return;
1099     }
1100 
1101     xive->regs[reg] = val;
1102 }
1103 
1104 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1105 {
1106     PnvXive *xive = PNV_XIVE(opaque);
1107     uint64_t val = 0;
1108     uint32_t reg = offset >> 3;
1109 
1110     switch (offset) {
1111     case CQ_CFG_PB_GEN:
1112     case CQ_IC_BAR:
1113     case CQ_TM1_BAR:
1114     case CQ_TM2_BAR:
1115     case CQ_PC_BAR:
1116     case CQ_PC_BARM:
1117     case CQ_VC_BAR:
1118     case CQ_VC_BARM:
1119     case CQ_TAR:
1120     case CQ_TDR:
1121     case CQ_PBI_CTL:
1122 
1123     case PC_TCTXT_CFG:
1124     case PC_TCTXT_TRACK:
1125     case PC_TCTXT_INDIR0:
1126     case PC_TCTXT_INDIR1:
1127     case PC_TCTXT_INDIR2:
1128     case PC_TCTXT_INDIR3:
1129     case PC_GLOBAL_CONFIG:
1130 
1131     case PC_VPC_SCRUB_MASK:
1132 
1133     case VC_GLOBAL_CONFIG:
1134     case VC_AIB_TX_ORDER_TAG2:
1135 
1136     case VC_IRQ_CONFIG_IPI:
1137     case VC_IRQ_CONFIG_HW:
1138     case VC_IRQ_CONFIG_CASCADE1:
1139     case VC_IRQ_CONFIG_CASCADE2:
1140     case VC_IRQ_CONFIG_REDIST:
1141     case VC_IRQ_CONFIG_IPI_CASC:
1142 
1143     case VC_EQC_SCRUB_MASK:
1144     case VC_IVC_SCRUB_MASK:
1145     case VC_SBC_CONFIG:
1146     case VC_AT_MACRO_KILL_MASK:
1147     case VC_VSD_TABLE_ADDR:
1148     case PC_VSD_TABLE_ADDR:
1149     case VC_VSD_TABLE_DATA:
1150     case PC_VSD_TABLE_DATA:
1151     case PC_THREAD_EN_REG0:
1152     case PC_THREAD_EN_REG1:
1153         val = xive->regs[reg];
1154         break;
1155 
1156     /*
1157      * XIVE hardware thread enablement
1158      */
1159     case PC_THREAD_EN_REG0_SET:
1160     case PC_THREAD_EN_REG0_CLR:
1161         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1162         break;
1163     case PC_THREAD_EN_REG1_SET:
1164     case PC_THREAD_EN_REG1_CLR:
1165         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1166         break;
1167 
1168     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1169         val = 0xffffff0000000000;
1170         break;
1171 
1172     /*
1173      * XIVE PC & VC cache updates for EAS, NVT and END
1174      */
1175     case VC_EQC_CWATCH_SPEC:
1176         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1177         val = xive->regs[reg];
1178         break;
1179     case VC_EQC_CWATCH_DAT0:
1180         /*
1181          * Load DATA registers from cache with data requested by the
1182          * SPEC register
1183          */
1184         pnv_xive_end_cache_load(xive);
1185         val = xive->regs[reg];
1186         break;
1187     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1188         val = xive->regs[reg];
1189         break;
1190 
1191     case PC_VPC_CWATCH_SPEC:
1192         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1193         val = xive->regs[reg];
1194         break;
1195     case PC_VPC_CWATCH_DAT0:
1196         /*
1197          * Load DATA registers from cache with data requested by the
1198          * SPEC register
1199          */
1200         pnv_xive_nvt_cache_load(xive);
1201         val = xive->regs[reg];
1202         break;
1203     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1204         val = xive->regs[reg];
1205         break;
1206 
1207     case PC_VPC_SCRUB_TRIG:
1208     case VC_IVC_SCRUB_TRIG:
1209     case VC_EQC_SCRUB_TRIG:
1210         xive->regs[reg] &= ~VC_SCRUB_VALID;
1211         val = xive->regs[reg];
1212         break;
1213 
1214     /*
1215      * XIVE PC & VC cache invalidation
1216      */
1217     case PC_AT_KILL:
1218         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1219         val = xive->regs[reg];
1220         break;
1221     case VC_AT_MACRO_KILL:
1222         xive->regs[reg] &= ~VC_KILL_VALID;
1223         val = xive->regs[reg];
1224         break;
1225 
1226     /*
1227      * XIVE synchronisation
1228      */
1229     case VC_EQC_CONFIG:
1230         val = VC_EQC_SYNC_MASK;
1231         break;
1232 
1233     default:
1234         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1235     }
1236 
1237     return val;
1238 }
1239 
1240 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1241     .read = pnv_xive_ic_reg_read,
1242     .write = pnv_xive_ic_reg_write,
1243     .endianness = DEVICE_BIG_ENDIAN,
1244     .valid = {
1245         .min_access_size = 8,
1246         .max_access_size = 8,
1247     },
1248     .impl = {
1249         .min_access_size = 8,
1250         .max_access_size = 8,
1251     },
1252 };
1253 
1254 /*
1255  * IC - Notify MMIO port page (write only)
1256  */
1257 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1258 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1259 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1260 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1261 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1262 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1263 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1264 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1265 
1266 /* VC synchronisation */
1267 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1268 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1269 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1270 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1271 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1272 
1273 /* PC synchronisation */
1274 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1275 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1276 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1277 
1278 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1279 {
1280     uint8_t blk;
1281     uint32_t idx;
1282 
1283     if (val & XIVE_TRIGGER_END) {
1284         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1285                    addr, val);
1286         return;
1287     }
1288 
1289     /*
1290      * Forward the source event notification directly to the Router.
1291      * The source interrupt number should already be correctly encoded
1292      * with the chip block id by the sending device (PHB, PSI).
1293      */
1294     blk = XIVE_EAS_BLOCK(val);
1295     idx = XIVE_EAS_INDEX(val);
1296 
1297     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1298 }
1299 
1300 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1301                                      unsigned size)
1302 {
1303     PnvXive *xive = PNV_XIVE(opaque);
1304 
1305     /* VC: HW triggers */
1306     switch (addr) {
1307     case 0x000 ... 0x7FF:
1308         pnv_xive_ic_hw_trigger(opaque, addr, val);
1309         break;
1310 
1311     /* VC: Forwarded IRQs */
1312     case PNV_XIVE_FORWARD_IPI:
1313     case PNV_XIVE_FORWARD_HW:
1314     case PNV_XIVE_FORWARD_OS_ESC:
1315     case PNV_XIVE_FORWARD_HW_ESC:
1316     case PNV_XIVE_FORWARD_REDIS:
1317         /* TODO: forwarded IRQs. Should be like HW triggers */
1318         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1319                    addr, val);
1320         break;
1321 
1322     /* VC syncs */
1323     case PNV_XIVE_SYNC_IPI:
1324     case PNV_XIVE_SYNC_HW:
1325     case PNV_XIVE_SYNC_OS_ESC:
1326     case PNV_XIVE_SYNC_HW_ESC:
1327     case PNV_XIVE_SYNC_REDIS:
1328         break;
1329 
1330     /* PC syncs */
1331     case PNV_XIVE_SYNC_PULL:
1332     case PNV_XIVE_SYNC_PUSH:
1333     case PNV_XIVE_SYNC_VPC:
1334         break;
1335 
1336     default:
1337         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1338     }
1339 }
1340 
1341 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1342                                         unsigned size)
1343 {
1344     PnvXive *xive = PNV_XIVE(opaque);
1345 
1346     /* loads are invalid */
1347     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1348     return -1;
1349 }
1350 
1351 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1352     .read = pnv_xive_ic_notify_read,
1353     .write = pnv_xive_ic_notify_write,
1354     .endianness = DEVICE_BIG_ENDIAN,
1355     .valid = {
1356         .min_access_size = 8,
1357         .max_access_size = 8,
1358     },
1359     .impl = {
1360         .min_access_size = 8,
1361         .max_access_size = 8,
1362     },
1363 };
1364 
1365 /*
1366  * IC - LSI MMIO handlers (not modeled)
1367  */
1368 
1369 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1370                               uint64_t val, unsigned size)
1371 {
1372     PnvXive *xive = PNV_XIVE(opaque);
1373 
1374     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1375 }
1376 
1377 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1378 {
1379     PnvXive *xive = PNV_XIVE(opaque);
1380 
1381     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1382     return -1;
1383 }
1384 
1385 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1386     .read = pnv_xive_ic_lsi_read,
1387     .write = pnv_xive_ic_lsi_write,
1388     .endianness = DEVICE_BIG_ENDIAN,
1389     .valid = {
1390         .min_access_size = 8,
1391         .max_access_size = 8,
1392     },
1393     .impl = {
1394         .min_access_size = 8,
1395         .max_access_size = 8,
1396     },
1397 };
1398 
1399 /*
1400  * IC - Indirect TIMA MMIO handlers
1401  */
1402 
1403 /*
1404  * When the TIMA is accessed from the indirect page, the thread id of
1405  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1406  * use. This is used for resets and for debug purpose also.
1407  */
1408 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1409 {
1410     PnvChip *chip = xive->chip;
1411     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1412     PowerPCCPU *cpu = NULL;
1413     int pir;
1414 
1415     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1416         xive_error(xive, "IC: no indirect TIMA access in progress");
1417         return NULL;
1418     }
1419 
1420     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1421     cpu = pnv_chip_find_cpu(chip, pir);
1422     if (!cpu) {
1423         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1424         return NULL;
1425     }
1426 
1427     /* Check that HW thread is XIVE enabled */
1428     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1429         xive_error(xive, "IC: CPU %x is not enabled", pir);
1430     }
1431 
1432     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1433 }
1434 
1435 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1436                                    uint64_t value, unsigned size)
1437 {
1438     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1439 
1440     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1441 }
1442 
1443 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1444                                       unsigned size)
1445 {
1446     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1447 
1448     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1449 }
1450 
1451 static const MemoryRegionOps xive_tm_indirect_ops = {
1452     .read = xive_tm_indirect_read,
1453     .write = xive_tm_indirect_write,
1454     .endianness = DEVICE_BIG_ENDIAN,
1455     .valid = {
1456         .min_access_size = 1,
1457         .max_access_size = 8,
1458     },
1459     .impl = {
1460         .min_access_size = 1,
1461         .max_access_size = 8,
1462     },
1463 };
1464 
1465 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1466                               uint64_t value, unsigned size)
1467 {
1468     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1469     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1470     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1471 
1472     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1473 }
1474 
1475 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1476 {
1477     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1478     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1479     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1480 
1481     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1482 }
1483 
1484 const MemoryRegionOps pnv_xive_tm_ops = {
1485     .read = pnv_xive_tm_read,
1486     .write = pnv_xive_tm_write,
1487     .endianness = DEVICE_BIG_ENDIAN,
1488     .valid = {
1489         .min_access_size = 1,
1490         .max_access_size = 8,
1491     },
1492     .impl = {
1493         .min_access_size = 1,
1494         .max_access_size = 8,
1495     },
1496 };
1497 
1498 /*
1499  * Interrupt controller XSCOM region.
1500  */
1501 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1502 {
1503     switch (addr >> 3) {
1504     case X_VC_EQC_CONFIG:
1505         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1506         return VC_EQC_SYNC_MASK;
1507     default:
1508         return pnv_xive_ic_reg_read(opaque, addr, size);
1509     }
1510 }
1511 
1512 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1513                                 uint64_t val, unsigned size)
1514 {
1515     pnv_xive_ic_reg_write(opaque, addr, val, size);
1516 }
1517 
1518 static const MemoryRegionOps pnv_xive_xscom_ops = {
1519     .read = pnv_xive_xscom_read,
1520     .write = pnv_xive_xscom_write,
1521     .endianness = DEVICE_BIG_ENDIAN,
1522     .valid = {
1523         .min_access_size = 8,
1524         .max_access_size = 8,
1525     },
1526     .impl = {
1527         .min_access_size = 8,
1528         .max_access_size = 8,
1529     }
1530 };
1531 
1532 /*
1533  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1534  */
1535 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1536                                  unsigned size)
1537 {
1538     PnvXive *xive = PNV_XIVE(opaque);
1539     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1540     uint64_t edt_type = 0;
1541     uint64_t edt_offset;
1542     MemTxResult result;
1543     AddressSpace *edt_as = NULL;
1544     uint64_t ret = -1;
1545 
1546     if (edt_index < XIVE_TABLE_EDT_MAX) {
1547         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1548     }
1549 
1550     switch (edt_type) {
1551     case CQ_TDR_EDT_IPI:
1552         edt_as = &xive->ipi_as;
1553         break;
1554     case CQ_TDR_EDT_EQ:
1555         edt_as = &xive->end_as;
1556         break;
1557     default:
1558         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1559         return -1;
1560     }
1561 
1562     /* Remap the offset for the targeted address space */
1563     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1564 
1565     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1566                             &result);
1567 
1568     if (result != MEMTX_OK) {
1569         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1570                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1571                    offset, edt_offset);
1572         return -1;
1573     }
1574 
1575     return ret;
1576 }
1577 
1578 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1579                               uint64_t val, unsigned size)
1580 {
1581     PnvXive *xive = PNV_XIVE(opaque);
1582     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1583     uint64_t edt_type = 0;
1584     uint64_t edt_offset;
1585     MemTxResult result;
1586     AddressSpace *edt_as = NULL;
1587 
1588     if (edt_index < XIVE_TABLE_EDT_MAX) {
1589         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1590     }
1591 
1592     switch (edt_type) {
1593     case CQ_TDR_EDT_IPI:
1594         edt_as = &xive->ipi_as;
1595         break;
1596     case CQ_TDR_EDT_EQ:
1597         edt_as = &xive->end_as;
1598         break;
1599     default:
1600         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1601                    offset);
1602         return;
1603     }
1604 
1605     /* Remap the offset for the targeted address space */
1606     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1607 
1608     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1609     if (result != MEMTX_OK) {
1610         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1611     }
1612 }
1613 
1614 static const MemoryRegionOps pnv_xive_vc_ops = {
1615     .read = pnv_xive_vc_read,
1616     .write = pnv_xive_vc_write,
1617     .endianness = DEVICE_BIG_ENDIAN,
1618     .valid = {
1619         .min_access_size = 8,
1620         .max_access_size = 8,
1621     },
1622     .impl = {
1623         .min_access_size = 8,
1624         .max_access_size = 8,
1625     },
1626 };
1627 
1628 /*
1629  * Presenter Controller MMIO region. The Virtualization Controller
1630  * updates the IPB in the NVT table when required. Not modeled.
1631  */
1632 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1633                                  unsigned size)
1634 {
1635     PnvXive *xive = PNV_XIVE(opaque);
1636 
1637     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1638     return -1;
1639 }
1640 
1641 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1642                               uint64_t value, unsigned size)
1643 {
1644     PnvXive *xive = PNV_XIVE(opaque);
1645 
1646     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1647 }
1648 
1649 static const MemoryRegionOps pnv_xive_pc_ops = {
1650     .read = pnv_xive_pc_read,
1651     .write = pnv_xive_pc_write,
1652     .endianness = DEVICE_BIG_ENDIAN,
1653     .valid = {
1654         .min_access_size = 8,
1655         .max_access_size = 8,
1656     },
1657     .impl = {
1658         .min_access_size = 8,
1659         .max_access_size = 8,
1660     },
1661 };
1662 
1663 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1664 {
1665     XiveRouter *xrtr = XIVE_ROUTER(xive);
1666     uint8_t blk = pnv_xive_block_id(xive);
1667     uint8_t chip_id = xive->chip->chip_id;
1668     uint32_t srcno0 = XIVE_EAS(blk, 0);
1669     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1670     XiveEAS eas;
1671     XiveEND end;
1672     int i;
1673 
1674     monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1675                    srcno0, srcno0 + nr_ipis - 1);
1676     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1677 
1678     monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1679                    srcno0, srcno0 + nr_ipis - 1);
1680     for (i = 0; i < nr_ipis; i++) {
1681         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1682             break;
1683         }
1684         if (!xive_eas_is_masked(&eas)) {
1685             xive_eas_pic_print_info(&eas, i, mon);
1686         }
1687     }
1688 
1689     monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1690     i = 0;
1691     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1692         xive_end_pic_print_info(&end, i++, mon);
1693     }
1694 
1695     monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1696     i = 0;
1697     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1698         xive_end_eas_pic_print_info(&end, i++, mon);
1699     }
1700 }
1701 
1702 static void pnv_xive_reset(void *dev)
1703 {
1704     PnvXive *xive = PNV_XIVE(dev);
1705     XiveSource *xsrc = &xive->ipi_source;
1706     XiveENDSource *end_xsrc = &xive->end_source;
1707 
1708     /* Default page size (Should be changed at runtime to 64k) */
1709     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1710 
1711     /* Clear subregions */
1712     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1713         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1714     }
1715 
1716     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1717         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1718     }
1719 
1720     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1721         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1722     }
1723 
1724     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1725         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1726     }
1727 }
1728 
1729 static void pnv_xive_init(Object *obj)
1730 {
1731     PnvXive *xive = PNV_XIVE(obj);
1732 
1733     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1734                             sizeof(xive->ipi_source), TYPE_XIVE_SOURCE,
1735                             &error_abort, NULL);
1736     object_initialize_child(obj, "end_source", &xive->end_source,
1737                             sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
1738                             &error_abort, NULL);
1739 }
1740 
1741 /*
1742  *  Maximum number of IRQs and ENDs supported by HW
1743  */
1744 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1745 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1746 
1747 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1748 {
1749     PnvXive *xive = PNV_XIVE(dev);
1750     XiveSource *xsrc = &xive->ipi_source;
1751     XiveENDSource *end_xsrc = &xive->end_source;
1752     Error *local_err = NULL;
1753 
1754     assert(xive->chip);
1755 
1756     /*
1757      * The XiveSource and XiveENDSource objects are realized with the
1758      * maximum allowed HW configuration. The ESB MMIO regions will be
1759      * resized dynamically when the controller is configured by the FW
1760      * to limit accesses to resources not provisioned.
1761      */
1762     object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs",
1763                             &error_fatal);
1764     object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive",
1765                              &error_abort);
1766     object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
1767     if (local_err) {
1768         error_propagate(errp, local_err);
1769         return;
1770     }
1771 
1772     object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends",
1773                             &error_fatal);
1774     object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive",
1775                              &error_abort);
1776     object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
1777     if (local_err) {
1778         error_propagate(errp, local_err);
1779         return;
1780     }
1781 
1782     /* Default page size. Generally changed at runtime to 64k */
1783     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1784 
1785     /* XSCOM region, used for initial configuration of the BARs */
1786     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1787                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1788 
1789     /* Interrupt controller MMIO regions */
1790     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1791                        PNV9_XIVE_IC_SIZE);
1792 
1793     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1794                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1795     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1796                           &pnv_xive_ic_notify_ops,
1797                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1798 
1799     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1800     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1801                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1802 
1803     /* Thread Interrupt Management Area (Indirect) */
1804     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1805                           &xive_tm_indirect_ops,
1806                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1807     /*
1808      * Overall Virtualization Controller MMIO region containing the
1809      * IPI ESB pages and END ESB pages. The layout is defined by the
1810      * EDT "Domain table" and the accesses are dispatched using
1811      * address spaces for each.
1812      */
1813     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1814                           "xive-vc", PNV9_XIVE_VC_SIZE);
1815 
1816     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1817                        PNV9_XIVE_VC_SIZE);
1818     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1819     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1820                        PNV9_XIVE_VC_SIZE);
1821     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1822 
1823     /*
1824      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1825      * VC region. Their size is configured by the FW in the EDT table.
1826      */
1827     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1828     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1829 
1830     /* Presenter Controller MMIO region (not modeled) */
1831     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1832                           "xive-pc", PNV9_XIVE_PC_SIZE);
1833 
1834     /* Thread Interrupt Management Area (Direct) */
1835     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
1836                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1837 
1838     qemu_register_reset(pnv_xive_reset, dev);
1839 }
1840 
1841 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1842                              int xscom_offset)
1843 {
1844     const char compat[] = "ibm,power9-xive-x";
1845     char *name;
1846     int offset;
1847     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1848     uint32_t reg[] = {
1849         cpu_to_be32(lpc_pcba),
1850         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1851     };
1852 
1853     name = g_strdup_printf("xive@%x", lpc_pcba);
1854     offset = fdt_add_subnode(fdt, xscom_offset, name);
1855     _FDT(offset);
1856     g_free(name);
1857 
1858     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1859     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1860                       sizeof(compat))));
1861     return 0;
1862 }
1863 
1864 static Property pnv_xive_properties[] = {
1865     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1866     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1867     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1868     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1869     /* The PnvChip id identifies the XIVE interrupt controller. */
1870     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1871     DEFINE_PROP_END_OF_LIST(),
1872 };
1873 
1874 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1875 {
1876     DeviceClass *dc = DEVICE_CLASS(klass);
1877     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1878     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1879     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1880     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1881 
1882     xdc->dt_xscom = pnv_xive_dt_xscom;
1883 
1884     dc->desc = "PowerNV XIVE Interrupt Controller";
1885     dc->realize = pnv_xive_realize;
1886     dc->props = pnv_xive_properties;
1887 
1888     xrc->get_eas = pnv_xive_get_eas;
1889     xrc->get_end = pnv_xive_get_end;
1890     xrc->write_end = pnv_xive_write_end;
1891     xrc->get_nvt = pnv_xive_get_nvt;
1892     xrc->write_nvt = pnv_xive_write_nvt;
1893 
1894     xnc->notify = pnv_xive_notify;
1895     xpc->match_nvt  = pnv_xive_match_nvt;
1896 };
1897 
1898 static const TypeInfo pnv_xive_info = {
1899     .name          = TYPE_PNV_XIVE,
1900     .parent        = TYPE_XIVE_ROUTER,
1901     .instance_init = pnv_xive_init,
1902     .instance_size = sizeof(PnvXive),
1903     .class_init    = pnv_xive_class_init,
1904     .interfaces    = (InterfaceInfo[]) {
1905         { TYPE_PNV_XSCOM_INTERFACE },
1906         { }
1907     }
1908 };
1909 
1910 static void pnv_xive_register_types(void)
1911 {
1912     type_register_static(&pnv_xive_info);
1913 }
1914 
1915 type_init(pnv_xive_register_types)
1916