xref: /openbmc/qemu/hw/intc/pnv_xive2.c (revision 96c674bf)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model  (POWER10)
3  *
4  * Copyright (c) 2019-2022, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "hw/ppc/fdt.h"
17 #include "hw/ppc/pnv.h"
18 #include "hw/ppc/pnv_chip.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/qtest.h"
29 
30 #include <libfdt.h>
31 
32 #include "pnv_xive2_regs.h"
33 
34 #undef XIVE2_DEBUG
35 
36 /* XIVE Sync or Flush Notification Block */
37 typedef struct XiveSfnBlock {
38     uint8_t bytes[32];
39 } XiveSfnBlock;
40 
41 /* XIVE Thread Sync or Flush Notification Area */
42 typedef struct XiveThreadNA {
43     XiveSfnBlock topo[16];
44 } XiveThreadNA;
45 
46 /*
47  * Virtual structures table (VST)
48  */
49 #define SBE_PER_BYTE   4
50 
51 typedef struct XiveVstInfo {
52     const char *name;
53     uint32_t    size;
54     uint32_t    max_blocks;
55 } XiveVstInfo;
56 
57 static const XiveVstInfo vst_infos[] = {
58 
59     [VST_EAS]  = { "EAT",  sizeof(Xive2Eas),     16 },
60     [VST_ESB]  = { "ESB",  1,                    16 },
61     [VST_END]  = { "ENDT", sizeof(Xive2End),     16 },
62 
63     [VST_NVP]  = { "NVPT", sizeof(Xive2Nvp),     16 },
64     [VST_NVG]  = { "NVGT", sizeof(Xive2Nvgc),    16 },
65     [VST_NVC]  = { "NVCT", sizeof(Xive2Nvgc),    16 },
66 
67     [VST_IC]  =  { "IC",   1, /* ? */            16 }, /* Topology # */
68     [VST_SYNC] = { "SYNC", sizeof(XiveThreadNA), 16 }, /* Topology # */
69 
70     /*
71      * This table contains the backing store pages for the interrupt
72      * fifos of the VC sub-engine in case of overflow.
73      *
74      * 0 - IPI,
75      * 1 - HWD,
76      * 2 - NxC,
77      * 3 - INT,
78      * 4 - OS-Queue,
79      * 5 - Pool-Queue,
80      * 6 - Hard-Queue
81      */
82     [VST_ERQ]  = { "ERQ",  1,                   VC_QUEUE_COUNT },
83 };
84 
85 #define xive2_error(xive, fmt, ...)                                      \
86     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
87                   (xive)->chip->chip_id, ## __VA_ARGS__);
88 
89 /*
90  * TODO: Document block id override
91  */
pnv_xive2_block_id(PnvXive2 * xive)92 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
93 {
94     uint8_t blk = xive->chip->chip_id;
95     uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
96 
97     if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
98         blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
99     }
100 
101     return blk;
102 }
103 
104 /*
105  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
106  * of the chips is good enough.
107  *
108  * TODO: Block scope support
109  */
pnv_xive2_get_remote(uint8_t blk)110 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
111 {
112     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
113     int i;
114 
115     for (i = 0; i < pnv->num_chips; i++) {
116         Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
117         PnvXive2 *xive = &chip10->xive;
118 
119         if (pnv_xive2_block_id(xive) == blk) {
120             return xive;
121         }
122     }
123     return NULL;
124 }
125 
126 /*
127  * VST accessors for ESB, EAT, ENDT, NVP
128  *
129  * Indirect VST tables are arrays of VSDs pointing to a page (of same
130  * size). Each page is a direct VST table.
131  */
132 
133 #define XIVE_VSD_SIZE 8
134 
135 /* Indirect page size can be 4K, 64K, 2M, 16M. */
pnv_xive2_vst_page_size_allowed(uint32_t page_shift)136 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
137 {
138      return page_shift == 12 || page_shift == 16 ||
139          page_shift == 21 || page_shift == 24;
140 }
141 
pnv_xive2_vst_addr_direct(PnvXive2 * xive,uint32_t type,uint64_t vsd,uint32_t idx)142 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
143                                           uint64_t vsd, uint32_t idx)
144 {
145     const XiveVstInfo *info = &vst_infos[type];
146     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
147     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
148     uint32_t idx_max;
149 
150     idx_max = vst_tsize / info->size - 1;
151     if (idx > idx_max) {
152 #ifdef XIVE2_DEBUG
153         xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
154                    info->name, idx, idx_max);
155 #endif
156         return 0;
157     }
158 
159     return vst_addr + idx * info->size;
160 }
161 
pnv_xive2_vst_addr_indirect(PnvXive2 * xive,uint32_t type,uint64_t vsd,uint32_t idx)162 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
163                                             uint64_t vsd, uint32_t idx)
164 {
165     const XiveVstInfo *info = &vst_infos[type];
166     uint64_t vsd_addr;
167     uint32_t vsd_idx;
168     uint32_t page_shift;
169     uint32_t vst_per_page;
170 
171     /* Get the page size of the indirect table. */
172     vsd_addr = vsd & VSD_ADDRESS_MASK;
173     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
174 
175     if (!(vsd & VSD_ADDRESS_MASK)) {
176 #ifdef XIVE2_DEBUG
177         xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
178 #endif
179         return 0;
180     }
181 
182     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
183 
184     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
185         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
186                    page_shift);
187         return 0;
188     }
189 
190     vst_per_page = (1ull << page_shift) / info->size;
191     vsd_idx = idx / vst_per_page;
192 
193     /* Load the VSD we are looking for, if not already done */
194     if (vsd_idx) {
195         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
196         ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
197                    MEMTXATTRS_UNSPECIFIED);
198 
199         if (!(vsd & VSD_ADDRESS_MASK)) {
200 #ifdef XIVE2_DEBUG
201             xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
202 #endif
203             return 0;
204         }
205 
206         /*
207          * Check that the pages have a consistent size across the
208          * indirect table
209          */
210         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
211             xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
212                        info->name, idx);
213             return 0;
214         }
215     }
216 
217     return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
218 }
219 
pnv_xive2_nvc_table_compress_shift(PnvXive2 * xive)220 static uint8_t pnv_xive2_nvc_table_compress_shift(PnvXive2 *xive)
221 {
222     uint8_t shift =  GETFIELD(PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS,
223                               xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
224     return shift > 8 ? 0 : shift;
225 }
226 
pnv_xive2_nvg_table_compress_shift(PnvXive2 * xive)227 static uint8_t pnv_xive2_nvg_table_compress_shift(PnvXive2 *xive)
228 {
229     uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS,
230                              xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
231     return shift > 8 ? 0 : shift;
232 }
233 
pnv_xive2_vst_addr(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx)234 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
235                                    uint32_t idx)
236 {
237     const XiveVstInfo *info = &vst_infos[type];
238     uint64_t vsd;
239 
240     if (blk >= info->max_blocks) {
241         xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
242                    blk, info->name, idx);
243         return 0;
244     }
245 
246     vsd = xive->vsds[type][blk];
247     if (vsd == 0) {
248         xive2_error(xive, "VST: vsd == 0 block id %d for VST %s %d !?",
249                    blk, info->name, idx);
250         return 0;
251     }
252 
253     /* Remote VST access */
254     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
255         xive = pnv_xive2_get_remote(blk);
256 
257         return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
258     }
259 
260     if (type == VST_NVG) {
261         idx >>= pnv_xive2_nvg_table_compress_shift(xive);
262     } else if (type == VST_NVC) {
263         idx >>= pnv_xive2_nvc_table_compress_shift(xive);
264     }
265 
266     if (VSD_INDIRECT & vsd) {
267         return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
268     }
269 
270     return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
271 }
272 
pnv_xive2_vst_read(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data)273 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
274                              uint32_t idx, void *data)
275 {
276     const XiveVstInfo *info = &vst_infos[type];
277     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
278     MemTxResult result;
279 
280     if (!addr) {
281         return -1;
282     }
283 
284     result = address_space_read(&address_space_memory, addr,
285                                 MEMTXATTRS_UNSPECIFIED, data,
286                                 info->size);
287     if (result != MEMTX_OK) {
288         xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
289                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
290         return -1;
291     }
292     return 0;
293 }
294 
295 #define XIVE_VST_WORD_ALL -1
296 
pnv_xive2_vst_write(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data,uint32_t word_number)297 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
298                                uint32_t idx, void *data, uint32_t word_number)
299 {
300     const XiveVstInfo *info = &vst_infos[type];
301     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
302     MemTxResult result;
303 
304     if (!addr) {
305         return -1;
306     }
307 
308     if (word_number == XIVE_VST_WORD_ALL) {
309         result = address_space_write(&address_space_memory, addr,
310                                      MEMTXATTRS_UNSPECIFIED, data,
311                                      info->size);
312     } else {
313         result = address_space_write(&address_space_memory,
314                                      addr + word_number * 4,
315                                      MEMTXATTRS_UNSPECIFIED,
316                                      data + word_number * 4, 4);
317     }
318 
319     if (result != MEMTX_OK) {
320         xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
321                    "for VST %s %x/%x\n", addr, info->name, blk, idx);
322         return -1;
323     }
324     return 0;
325 }
326 
pnv_xive2_get_pq(Xive2Router * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)327 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
328                              uint8_t *pq)
329 {
330     PnvXive2 *xive = PNV_XIVE2(xrtr);
331 
332     if (pnv_xive2_block_id(xive) != blk) {
333         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
334         return -1;
335     }
336 
337     *pq = xive_source_esb_get(&xive->ipi_source, idx);
338     return 0;
339 }
340 
pnv_xive2_set_pq(Xive2Router * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)341 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
342                              uint8_t *pq)
343 {
344     PnvXive2 *xive = PNV_XIVE2(xrtr);
345 
346     if (pnv_xive2_block_id(xive) != blk) {
347         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
348         return -1;
349     }
350 
351     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
352     return 0;
353 }
354 
pnv_xive2_get_end(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2End * end)355 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
356                              Xive2End *end)
357 {
358     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
359 }
360 
pnv_xive2_write_end(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2End * end,uint8_t word_number)361 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
362                                Xive2End *end, uint8_t word_number)
363 {
364     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
365                               word_number);
366 }
367 
pnv_xive2_get_current_pir(PnvXive2 * xive)368 static inline int pnv_xive2_get_current_pir(PnvXive2 *xive)
369 {
370     if (!qtest_enabled()) {
371         PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
372         return ppc_cpu_pir(cpu);
373     }
374     return 0;
375 }
376 
377 /*
378  * After SW injects a Queue Sync or Cache Flush operation, HW will notify
379  * SW of the completion of the operation by writing a byte of all 1's (0xff)
380  * to a specific memory location.  The memory location is calculated by first
381  * looking up a base address in the SYNC VSD using the Topology ID of the
382  * originating thread as the "block" number.  This points to a
383  * 64k block of memory that is further divided into 128 512 byte chunks of
384  * memory, which is indexed by the thread id of the requesting thread.
385  * Finally, this 512 byte chunk of memory is divided into 16 32 byte
386  * chunks which are indexed by the topology id of the targeted IC's chip.
387  * The values below are the offsets into that 32 byte chunk of memory for
388  * each type of cache flush or queue sync operation.
389  */
390 #define PNV_XIVE2_QUEUE_IPI              0x00
391 #define PNV_XIVE2_QUEUE_HW               0x01
392 #define PNV_XIVE2_QUEUE_NXC              0x02
393 #define PNV_XIVE2_QUEUE_INT              0x03
394 #define PNV_XIVE2_QUEUE_OS               0x04
395 #define PNV_XIVE2_QUEUE_POOL             0x05
396 #define PNV_XIVE2_QUEUE_HARD             0x06
397 #define PNV_XIVE2_CACHE_ENDC             0x08
398 #define PNV_XIVE2_CACHE_ESBC             0x09
399 #define PNV_XIVE2_CACHE_EASC             0x0a
400 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO   0x10
401 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO    0x11
402 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI   0x12
403 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI    0x13
404 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI   0x14
405 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI    0x15
406 #define PNV_XIVE2_CACHE_NXC              0x18
407 
pnv_xive2_inject_notify(PnvXive2 * xive,int type)408 static int pnv_xive2_inject_notify(PnvXive2 *xive, int type)
409 {
410     uint64_t addr;
411     int pir = pnv_xive2_get_current_pir(xive);
412     int thread_nr = PNV10_PIR2THREAD(pir);
413     int thread_topo_id = PNV10_PIR2CHIP(pir);
414     int ic_topo_id = xive->chip->chip_id;
415     uint64_t offset = ic_topo_id * sizeof(XiveSfnBlock);
416     uint8_t byte = 0xff;
417     MemTxResult result;
418 
419     /* Retrieve the address of requesting thread's notification area */
420     addr = pnv_xive2_vst_addr(xive, VST_SYNC, thread_topo_id, thread_nr);
421 
422     if (!addr) {
423         xive2_error(xive, "VST: no SYNC entry %x/%x !?",
424                     thread_topo_id, thread_nr);
425         return -1;
426     }
427 
428     address_space_stb(&address_space_memory, addr + offset + type, byte,
429                       MEMTXATTRS_UNSPECIFIED, &result);
430     assert(result == MEMTX_OK);
431 
432     return 0;
433 }
434 
pnv_xive2_end_update(PnvXive2 * xive,uint8_t watch_engine)435 static int pnv_xive2_end_update(PnvXive2 *xive, uint8_t watch_engine)
436 {
437     uint8_t  blk;
438     uint32_t idx;
439     int i, spec_reg, data_reg;
440     uint64_t endc_watch[4];
441 
442     assert(watch_engine < ARRAY_SIZE(endc_watch));
443 
444     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
445     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
446     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
447     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
448 
449     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
450         endc_watch[i] = cpu_to_be64(xive->vc_regs[data_reg + i]);
451     }
452 
453     return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
454                               XIVE_VST_WORD_ALL);
455 }
456 
pnv_xive2_end_cache_load(PnvXive2 * xive,uint8_t watch_engine)457 static void pnv_xive2_end_cache_load(PnvXive2 *xive, uint8_t watch_engine)
458 {
459     uint8_t  blk;
460     uint32_t idx;
461     uint64_t endc_watch[4] = { 0 };
462     int i, spec_reg, data_reg;
463 
464     assert(watch_engine < ARRAY_SIZE(endc_watch));
465 
466     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
467     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
468     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
469     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
470 
471     if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
472         xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
473     }
474 
475     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
476         xive->vc_regs[data_reg + i] = be64_to_cpu(endc_watch[i]);
477     }
478 }
479 
pnv_xive2_get_nvp(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Nvp * nvp)480 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
481                              Xive2Nvp *nvp)
482 {
483     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
484 }
485 
pnv_xive2_write_nvp(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Nvp * nvp,uint8_t word_number)486 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
487                                Xive2Nvp *nvp, uint8_t word_number)
488 {
489     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
490                               word_number);
491 }
492 
pnv_xive2_nxc_to_table_type(uint8_t nxc_type,uint32_t * table_type)493 static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
494 {
495     switch (nxc_type) {
496     case PC_NXC_WATCH_NXC_NVP:
497         *table_type = VST_NVP;
498         break;
499     case PC_NXC_WATCH_NXC_NVG:
500         *table_type = VST_NVG;
501         break;
502     case PC_NXC_WATCH_NXC_NVC:
503         *table_type = VST_NVC;
504         break;
505     default:
506         qemu_log_mask(LOG_GUEST_ERROR,
507                       "XIVE: invalid table type for nxc operation\n");
508         return -1;
509     }
510     return 0;
511 }
512 
pnv_xive2_nxc_update(PnvXive2 * xive,uint8_t watch_engine)513 static int pnv_xive2_nxc_update(PnvXive2 *xive, uint8_t watch_engine)
514 {
515     uint8_t  blk, nxc_type;
516     uint32_t idx, table_type = -1;
517     int i, spec_reg, data_reg;
518     uint64_t nxc_watch[4];
519 
520     assert(watch_engine < ARRAY_SIZE(nxc_watch));
521 
522     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
523     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
524     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
525     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
526     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
527 
528     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
529 
530     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
531         nxc_watch[i] = cpu_to_be64(xive->pc_regs[data_reg + i]);
532     }
533 
534     return pnv_xive2_vst_write(xive, table_type, blk, idx, nxc_watch,
535                               XIVE_VST_WORD_ALL);
536 }
537 
pnv_xive2_nxc_cache_load(PnvXive2 * xive,uint8_t watch_engine)538 static void pnv_xive2_nxc_cache_load(PnvXive2 *xive, uint8_t watch_engine)
539 {
540     uint8_t  blk, nxc_type;
541     uint32_t idx, table_type = -1;
542     uint64_t nxc_watch[4] = { 0 };
543     int i, spec_reg, data_reg;
544 
545     assert(watch_engine < ARRAY_SIZE(nxc_watch));
546 
547     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
548     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
549     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
550     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
551     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
552 
553     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
554 
555     if (pnv_xive2_vst_read(xive, table_type, blk, idx, nxc_watch)) {
556         xive2_error(xive, "VST: no NXC entry %x/%x in %s table!?",
557                     blk, idx, vst_infos[table_type].name);
558     }
559 
560     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
561         xive->pc_regs[data_reg + i] = be64_to_cpu(nxc_watch[i]);
562     }
563 }
564 
pnv_xive2_get_eas(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Eas * eas)565 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
566                             Xive2Eas *eas)
567 {
568     PnvXive2 *xive = PNV_XIVE2(xrtr);
569 
570     if (pnv_xive2_block_id(xive) != blk) {
571         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
572         return -1;
573     }
574 
575     return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
576 }
577 
pnv_xive2_get_config(Xive2Router * xrtr)578 static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
579 {
580     PnvXive2 *xive = PNV_XIVE2(xrtr);
581     uint32_t cfg = 0;
582 
583     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
584         cfg |= XIVE2_GEN1_TIMA_OS;
585     }
586 
587     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
588         cfg |= XIVE2_VP_SAVE_RESTORE;
589     }
590 
591     if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
592               xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
593         cfg |= XIVE2_THREADID_8BITS;
594     }
595 
596     return cfg;
597 }
598 
pnv_xive2_is_cpu_enabled(PnvXive2 * xive,PowerPCCPU * cpu)599 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
600 {
601     int pir = ppc_cpu_pir(cpu);
602     uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
603     uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
604     uint32_t bit = pir & 0x3f;
605 
606     return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
607 }
608 
pnv_xive2_match_nvt(XivePresenter * xptr,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool cam_ignore,uint8_t priority,uint32_t logic_serv,XiveTCTXMatch * match)609 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
610                                uint8_t nvt_blk, uint32_t nvt_idx,
611                                bool cam_ignore, uint8_t priority,
612                                uint32_t logic_serv, XiveTCTXMatch *match)
613 {
614     PnvXive2 *xive = PNV_XIVE2(xptr);
615     PnvChip *chip = xive->chip;
616     int count = 0;
617     int i, j;
618     bool gen1_tima_os =
619         xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
620 
621     for (i = 0; i < chip->nr_cores; i++) {
622         PnvCore *pc = chip->cores[i];
623         CPUCore *cc = CPU_CORE(pc);
624 
625         for (j = 0; j < cc->nr_threads; j++) {
626             PowerPCCPU *cpu = pc->threads[j];
627             XiveTCTX *tctx;
628             int ring;
629 
630             if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
631                 continue;
632             }
633 
634             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
635 
636             if (gen1_tima_os) {
637                 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
638                                                  nvt_idx, cam_ignore,
639                                                  logic_serv);
640             } else {
641                 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
642                                                    nvt_idx, cam_ignore,
643                                                    logic_serv);
644             }
645 
646             /*
647              * Save the context and follow on to catch duplicates,
648              * that we don't support yet.
649              */
650             if (ring != -1) {
651                 if (match->tctx) {
652                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
653                                   "thread context NVT %x/%x\n",
654                                   nvt_blk, nvt_idx);
655                     return false;
656                 }
657 
658                 match->ring = ring;
659                 match->tctx = tctx;
660                 count++;
661             }
662         }
663     }
664 
665     return count;
666 }
667 
pnv_xive2_presenter_get_config(XivePresenter * xptr)668 static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
669 {
670     PnvXive2 *xive = PNV_XIVE2(xptr);
671     uint32_t cfg = 0;
672 
673     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
674         cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
675     }
676     return cfg;
677 }
678 
pnv_xive2_get_block_id(Xive2Router * xrtr)679 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
680 {
681     return pnv_xive2_block_id(PNV_XIVE2(xrtr));
682 }
683 
684 /*
685  * The TIMA MMIO space is shared among the chips and to identify the
686  * chip from which the access is being done, we extract the chip id
687  * from the PIR.
688  */
pnv_xive2_tm_get_xive(PowerPCCPU * cpu)689 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
690 {
691     int pir = ppc_cpu_pir(cpu);
692     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
693     PnvXive2 *xive = PNV_XIVE2(xptr);
694 
695     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
696         xive2_error(xive, "IC: CPU %x is not enabled", pir);
697     }
698     return xive;
699 }
700 
701 /*
702  * The internal sources of the interrupt controller have no knowledge
703  * of the XIVE2 chip on which they reside. Encode the block id in the
704  * source interrupt number before forwarding the source event
705  * notification to the Router. This is required on a multichip system.
706  */
pnv_xive2_notify(XiveNotifier * xn,uint32_t srcno,bool pq_checked)707 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
708 {
709     PnvXive2 *xive = PNV_XIVE2(xn);
710     uint8_t blk = pnv_xive2_block_id(xive);
711 
712     xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
713 }
714 
715 /*
716  * Set Translation Tables
717  *
718  * TODO add support for multiple sets
719  */
pnv_xive2_stt_set_data(PnvXive2 * xive,uint64_t val)720 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
721 {
722     uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
723     uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
724                                   xive->cq_regs[CQ_TAR >> 3]);
725 
726     switch (tsel) {
727     case CQ_TAR_NVPG:
728     case CQ_TAR_ESB:
729     case CQ_TAR_END:
730     case CQ_TAR_NVC:
731         xive->tables[tsel][entry] = val;
732         break;
733     default:
734         xive2_error(xive, "IC: unsupported table %d", tsel);
735         return -1;
736     }
737 
738     if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
739         xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
740                      xive->cq_regs[CQ_TAR >> 3], ++entry);
741     }
742 
743     return 0;
744 }
745 /*
746  * Virtual Structure Tables (VST) configuration
747  */
pnv_xive2_vst_set_exclusive(PnvXive2 * xive,uint8_t type,uint8_t blk,uint64_t vsd)748 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
749                                         uint8_t blk, uint64_t vsd)
750 {
751     Xive2EndSource *end_xsrc = &xive->end_source;
752     XiveSource *xsrc = &xive->ipi_source;
753     const XiveVstInfo *info = &vst_infos[type];
754     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
755     uint64_t vst_tsize = 1ull << page_shift;
756     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
757 
758     /* Basic checks */
759 
760     if (VSD_INDIRECT & vsd) {
761         if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
762             xive2_error(xive, "VST: invalid %s page shift %d", info->name,
763                        page_shift);
764             return;
765         }
766     }
767 
768     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
769         xive2_error(xive, "VST: %s table address 0x%"PRIx64
770                     " is not aligned with page shift %d",
771                     info->name, vst_addr, page_shift);
772         return;
773     }
774 
775     /* Record the table configuration (in SRAM on HW) */
776     xive->vsds[type][blk] = vsd;
777 
778     /* Now tune the models with the configuration provided by the FW */
779 
780     switch (type) {
781     case VST_ESB:
782         /*
783          * Backing store pages for the source PQ bits. The model does
784          * not use these PQ bits backed in RAM because the XiveSource
785          * model has its own.
786          *
787          * If the table is direct, we can compute the number of PQ
788          * entries provisioned by FW (such as skiboot) and resize the
789          * ESB window accordingly.
790          */
791         if (memory_region_is_mapped(&xsrc->esb_mmio)) {
792             memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
793         }
794         if (!(VSD_INDIRECT & vsd)) {
795             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
796                                    * (1ull << xsrc->esb_shift));
797         }
798 
799         memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
800         break;
801 
802     case VST_EAS:  /* Nothing to be done */
803         break;
804 
805     case VST_END:
806         /*
807          * Backing store pages for the END.
808          */
809         if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
810             memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
811         }
812         if (!(VSD_INDIRECT & vsd)) {
813             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
814                                    * (1ull << end_xsrc->esb_shift));
815         }
816         memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
817         break;
818 
819     case VST_NVP:  /* Not modeled */
820     case VST_NVG:  /* Not modeled */
821     case VST_NVC:  /* Not modeled */
822     case VST_IC:   /* Not modeled */
823     case VST_SYNC: /* Not modeled */
824     case VST_ERQ:  /* Not modeled */
825         break;
826 
827     default:
828         g_assert_not_reached();
829     }
830 }
831 
832 /*
833  * Both PC and VC sub-engines are configured as each use the Virtual
834  * Structure Tables
835  */
pnv_xive2_vst_set_data(PnvXive2 * xive,uint64_t vsd,uint8_t type,uint8_t blk)836 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd,
837                                    uint8_t type, uint8_t blk)
838 {
839     uint8_t mode = GETFIELD(VSD_MODE, vsd);
840     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
841 
842     if (type > VST_ERQ) {
843         xive2_error(xive, "VST: invalid table type %d", type);
844         return;
845     }
846 
847     if (blk >= vst_infos[type].max_blocks) {
848         xive2_error(xive, "VST: invalid block id %d for"
849                       " %s table", blk, vst_infos[type].name);
850         return;
851     }
852 
853     if (!vst_addr) {
854         xive2_error(xive, "VST: invalid %s table address",
855                    vst_infos[type].name);
856         return;
857     }
858 
859     switch (mode) {
860     case VSD_MODE_FORWARD:
861         xive->vsds[type][blk] = vsd;
862         break;
863 
864     case VSD_MODE_EXCLUSIVE:
865         pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
866         break;
867 
868     default:
869         xive2_error(xive, "VST: unsupported table mode %d", mode);
870         return;
871     }
872 }
873 
pnv_xive2_vc_vst_set_data(PnvXive2 * xive,uint64_t vsd)874 static void pnv_xive2_vc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
875 {
876     uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
877                             xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
878     uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
879                            xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
880 
881     pnv_xive2_vst_set_data(xive, vsd, type, blk);
882 }
883 
884 /*
885  * MMIO handlers
886  */
887 
888 
889 /*
890  * IC BAR layout
891  *
892  * Page 0: Internal CQ register accesses (reads & writes)
893  * Page 1: Internal PC register accesses (reads & writes)
894  * Page 2: Internal VC register accesses (reads & writes)
895  * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
896  * Page 4: Notify Port page (writes only, w/data),
897  * Page 5: Reserved
898  * Page 6: Sync Poll page (writes only, dataless)
899  * Page 7: Sync Inject page (writes only, dataless)
900  * Page 8: LSI Trigger page (writes only, dataless)
901  * Page 9: LSI SB Management page (reads & writes dataless)
902  * Pages 10-255: Reserved
903  * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
904  *                covering the 128 threads in P10.
905  * Pages 384-511: Reserved
906  */
907 typedef struct PnvXive2Region {
908     const char *name;
909     uint32_t pgoff;
910     uint32_t pgsize;
911     const MemoryRegionOps *ops;
912 } PnvXive2Region;
913 
914 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
915 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
916 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
917 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
918 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
919 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
920 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
921 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
922 
923 /* 512 pages. 4K: 2M range, 64K: 32M range */
924 static const PnvXive2Region pnv_xive2_ic_regions[] = {
925     { "xive-ic-cq",        0,   1,   &pnv_xive2_ic_cq_ops     },
926     { "xive-ic-vc",        1,   1,   &pnv_xive2_ic_vc_ops     },
927     { "xive-ic-pc",        2,   1,   &pnv_xive2_ic_pc_ops     },
928     { "xive-ic-tctxt",     3,   1,   &pnv_xive2_ic_tctxt_ops  },
929     { "xive-ic-notify",    4,   1,   &pnv_xive2_ic_notify_ops },
930     /* page 5 reserved */
931     { "xive-ic-sync",      6,   2,   &pnv_xive2_ic_sync_ops   },
932     { "xive-ic-lsi",       8,   2,   &pnv_xive2_ic_lsi_ops    },
933     /* pages 10-255 reserved */
934     { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops  },
935     /* pages 384-511 reserved */
936 };
937 
938 /*
939  * CQ operations
940  */
941 
pnv_xive2_ic_cq_read(void * opaque,hwaddr offset,unsigned size)942 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
943                                         unsigned size)
944 {
945     PnvXive2 *xive = PNV_XIVE2(opaque);
946     uint32_t reg = offset >> 3;
947     uint64_t val = 0;
948 
949     switch (offset) {
950     case CQ_XIVE_CAP: /* Set at reset */
951     case CQ_XIVE_CFG:
952         val = xive->cq_regs[reg];
953         break;
954     case CQ_MSGSND: /* TODO check the #cores of the machine */
955         val = 0xffffffff00000000;
956         break;
957     case CQ_CFG_PB_GEN:
958         val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
959         break;
960     default:
961         xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
962     }
963 
964     return val;
965 }
966 
pnv_xive2_bar_size(uint64_t val)967 static uint64_t pnv_xive2_bar_size(uint64_t val)
968 {
969     return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
970 }
971 
pnv_xive2_ic_cq_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)972 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
973                                   uint64_t val, unsigned size)
974 {
975     PnvXive2 *xive = PNV_XIVE2(opaque);
976     MemoryRegion *sysmem = get_system_memory();
977     uint32_t reg = offset >> 3;
978     int i;
979 
980     switch (offset) {
981     case CQ_XIVE_CFG:
982     case CQ_RST_CTL: /* TODO: reset all BARs */
983         break;
984 
985     case CQ_IC_BAR:
986         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
987         if (!(val & CQ_IC_BAR_VALID)) {
988             xive->ic_base = 0;
989             if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
990                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
991                     memory_region_del_subregion(&xive->ic_mmio,
992                                                 &xive->ic_mmios[i]);
993                 }
994                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
995             }
996         } else {
997             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
998             if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
999                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1000                     memory_region_add_subregion(&xive->ic_mmio,
1001                                pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
1002                                &xive->ic_mmios[i]);
1003                 }
1004                 memory_region_add_subregion(sysmem, xive->ic_base,
1005                                             &xive->ic_mmio);
1006             }
1007         }
1008         break;
1009 
1010     case CQ_TM_BAR:
1011         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1012         if (!(val & CQ_TM_BAR_VALID)) {
1013             xive->tm_base = 0;
1014             if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
1015                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1016             }
1017         } else {
1018             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1019             if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
1020                 memory_region_add_subregion(sysmem, xive->tm_base,
1021                                             &xive->tm_mmio);
1022             }
1023         }
1024         break;
1025 
1026     case CQ_ESB_BAR:
1027         xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
1028         if (!(val & CQ_BAR_VALID)) {
1029             xive->esb_base = 0;
1030             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1031                 memory_region_del_subregion(sysmem, &xive->esb_mmio);
1032             }
1033         } else {
1034             xive->esb_base = val & CQ_BAR_ADDR;
1035             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1036                 memory_region_set_size(&xive->esb_mmio,
1037                                        pnv_xive2_bar_size(val));
1038                 memory_region_add_subregion(sysmem, xive->esb_base,
1039                                             &xive->esb_mmio);
1040             }
1041         }
1042         break;
1043 
1044     case CQ_END_BAR:
1045         xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
1046         if (!(val & CQ_BAR_VALID)) {
1047             xive->end_base = 0;
1048             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1049                 memory_region_del_subregion(sysmem, &xive->end_mmio);
1050             }
1051         } else {
1052             xive->end_base = val & CQ_BAR_ADDR;
1053             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1054                 memory_region_set_size(&xive->end_mmio,
1055                                        pnv_xive2_bar_size(val));
1056                 memory_region_add_subregion(sysmem, xive->end_base,
1057                                             &xive->end_mmio);
1058             }
1059         }
1060         break;
1061 
1062     case CQ_NVC_BAR:
1063         xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
1064         if (!(val & CQ_BAR_VALID)) {
1065             xive->nvc_base = 0;
1066             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1067                 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
1068             }
1069         } else {
1070             xive->nvc_base = val & CQ_BAR_ADDR;
1071             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1072                 memory_region_set_size(&xive->nvc_mmio,
1073                                        pnv_xive2_bar_size(val));
1074                 memory_region_add_subregion(sysmem, xive->nvc_base,
1075                                             &xive->nvc_mmio);
1076             }
1077         }
1078         break;
1079 
1080     case CQ_NVPG_BAR:
1081         xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
1082         if (!(val & CQ_BAR_VALID)) {
1083             xive->nvpg_base = 0;
1084             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1085                 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
1086             }
1087         } else {
1088             xive->nvpg_base = val & CQ_BAR_ADDR;
1089             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1090                 memory_region_set_size(&xive->nvpg_mmio,
1091                                        pnv_xive2_bar_size(val));
1092                 memory_region_add_subregion(sysmem, xive->nvpg_base,
1093                                             &xive->nvpg_mmio);
1094             }
1095         }
1096         break;
1097 
1098     case CQ_TAR: /* Set Translation Table Address */
1099         break;
1100     case CQ_TDR: /* Set Translation Table Data */
1101         pnv_xive2_stt_set_data(xive, val);
1102         break;
1103     case CQ_FIRMASK_OR: /* FIR error reporting */
1104         break;
1105     default:
1106         xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
1107         return;
1108     }
1109 
1110     xive->cq_regs[reg] = val;
1111 }
1112 
1113 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
1114     .read = pnv_xive2_ic_cq_read,
1115     .write = pnv_xive2_ic_cq_write,
1116     .endianness = DEVICE_BIG_ENDIAN,
1117     .valid = {
1118         .min_access_size = 8,
1119         .max_access_size = 8,
1120     },
1121     .impl = {
1122         .min_access_size = 8,
1123         .max_access_size = 8,
1124     },
1125 };
1126 
pnv_xive2_cache_watch_assign(uint64_t engine_mask,uint64_t * state)1127 static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask,
1128                                             uint64_t *state)
1129 {
1130     uint8_t val = 0xFF;
1131     int i;
1132 
1133     for (i = 3; i >= 0; i--) {
1134         if (BIT(i) & engine_mask) {
1135             if (!(BIT(i) & *state)) {
1136                 *state |= BIT(i);
1137                 val = 3 - i;
1138                 break;
1139             }
1140         }
1141     }
1142     return val;
1143 }
1144 
pnv_xive2_cache_watch_release(uint64_t * state,uint8_t watch_engine)1145 static void pnv_xive2_cache_watch_release(uint64_t *state, uint8_t watch_engine)
1146 {
1147     uint8_t engine_bit = 3 - watch_engine;
1148 
1149     if (*state & BIT(engine_bit)) {
1150         *state &= ~BIT(engine_bit);
1151     }
1152 }
1153 
pnv_xive2_endc_cache_watch_assign(PnvXive2 * xive)1154 static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2 *xive)
1155 {
1156     uint64_t engine_mask = GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN,
1157                                     xive->vc_regs[VC_ENDC_CFG >> 3]);
1158     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1159     uint8_t val;
1160 
1161     /*
1162      * We keep track of which engines are currently busy in the
1163      * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
1164      * the register, we don't return its value but the ID of an engine
1165      * it can use.
1166      * There are 4 engines. 0xFF means no engine is available.
1167      */
1168     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1169     if (val != 0xFF) {
1170         xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1171     }
1172     return val;
1173 }
1174 
pnv_xive2_endc_cache_watch_release(PnvXive2 * xive,uint8_t watch_engine)1175 static void pnv_xive2_endc_cache_watch_release(PnvXive2 *xive,
1176                                                uint8_t watch_engine)
1177 {
1178     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1179 
1180     pnv_xive2_cache_watch_release(&state, watch_engine);
1181     xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1182 }
1183 
pnv_xive2_ic_vc_read(void * opaque,hwaddr offset,unsigned size)1184 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
1185                                      unsigned size)
1186 {
1187     PnvXive2 *xive = PNV_XIVE2(opaque);
1188     uint64_t val = 0;
1189     uint32_t reg = offset >> 3;
1190     uint8_t watch_engine;
1191 
1192     switch (offset) {
1193     /*
1194      * VSD table settings.
1195      */
1196     case VC_VSD_TABLE_ADDR:
1197     case VC_VSD_TABLE_DATA:
1198         val = xive->vc_regs[reg];
1199         break;
1200 
1201     /*
1202      * ESB cache updates (not modeled)
1203      */
1204     case VC_ESBC_FLUSH_CTRL:
1205         xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
1206         val = xive->vc_regs[reg];
1207         break;
1208 
1209     case VC_ESBC_CFG:
1210         val = xive->vc_regs[reg];
1211         break;
1212 
1213     /*
1214      * EAS cache updates (not modeled)
1215      */
1216     case VC_EASC_FLUSH_CTRL:
1217         xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
1218         val = xive->vc_regs[reg];
1219         break;
1220 
1221     case VC_ENDC_WATCH_ASSIGN:
1222         val = pnv_xive2_endc_cache_watch_assign(xive);
1223         break;
1224 
1225     case VC_ENDC_CFG:
1226         val = xive->vc_regs[reg];
1227         break;
1228 
1229     /*
1230      * END cache updates
1231      */
1232     case VC_ENDC_WATCH0_SPEC:
1233     case VC_ENDC_WATCH1_SPEC:
1234     case VC_ENDC_WATCH2_SPEC:
1235     case VC_ENDC_WATCH3_SPEC:
1236         watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
1237         xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
1238         pnv_xive2_endc_cache_watch_release(xive, watch_engine);
1239         val = xive->vc_regs[reg];
1240         break;
1241 
1242     case VC_ENDC_WATCH0_DATA0:
1243     case VC_ENDC_WATCH1_DATA0:
1244     case VC_ENDC_WATCH2_DATA0:
1245     case VC_ENDC_WATCH3_DATA0:
1246         /*
1247          * Load DATA registers from cache with data requested by the
1248          * SPEC register
1249          */
1250         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1251         pnv_xive2_end_cache_load(xive, watch_engine);
1252         val = xive->vc_regs[reg];
1253         break;
1254 
1255     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1256     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1257     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1258     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1259         val = xive->vc_regs[reg];
1260         break;
1261 
1262     case VC_ENDC_FLUSH_CTRL:
1263         xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
1264         val = xive->vc_regs[reg];
1265         break;
1266 
1267     /*
1268      * Indirect invalidation
1269      */
1270     case VC_AT_MACRO_KILL_MASK:
1271         val = xive->vc_regs[reg];
1272         break;
1273 
1274     case VC_AT_MACRO_KILL:
1275         xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1276         val = xive->vc_regs[reg];
1277         break;
1278 
1279     /*
1280      * Interrupt fifo overflow in memory backing store (Not modeled)
1281      */
1282     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1283         val = xive->vc_regs[reg];
1284         break;
1285 
1286     /*
1287      * Synchronisation
1288      */
1289     case VC_ENDC_SYNC_DONE:
1290         val = VC_ENDC_SYNC_POLL_DONE;
1291         break;
1292     default:
1293         xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1294     }
1295 
1296     return val;
1297 }
1298 
pnv_xive2_ic_vc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1299 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1300                                   uint64_t val, unsigned size)
1301 {
1302     PnvXive2 *xive = PNV_XIVE2(opaque);
1303     uint32_t reg = offset >> 3;
1304     uint8_t watch_engine;
1305 
1306     switch (offset) {
1307     /*
1308      * VSD table settings.
1309      */
1310     case VC_VSD_TABLE_ADDR:
1311        break;
1312     case VC_VSD_TABLE_DATA:
1313         pnv_xive2_vc_vst_set_data(xive, val);
1314         break;
1315 
1316     /*
1317      * ESB cache updates (not modeled)
1318      */
1319     /* case VC_ESBC_FLUSH_CTRL: */
1320     case VC_ESBC_FLUSH_POLL:
1321         xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1322         /* ESB update */
1323         break;
1324 
1325     case VC_ESBC_FLUSH_INJECT:
1326         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ESBC);
1327         break;
1328 
1329     case VC_ESBC_CFG:
1330         break;
1331 
1332     /*
1333      * EAS cache updates (not modeled)
1334      */
1335     /* case VC_EASC_FLUSH_CTRL: */
1336     case VC_EASC_FLUSH_POLL:
1337         xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1338         /* EAS update */
1339         break;
1340 
1341     case VC_EASC_FLUSH_INJECT:
1342         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_EASC);
1343         break;
1344 
1345     case VC_ENDC_CFG:
1346         break;
1347 
1348     /*
1349      * END cache updates
1350      */
1351     case VC_ENDC_WATCH0_SPEC:
1352     case VC_ENDC_WATCH1_SPEC:
1353     case VC_ENDC_WATCH2_SPEC:
1354     case VC_ENDC_WATCH3_SPEC:
1355          val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1356         break;
1357 
1358     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1359     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1360     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1361     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1362         break;
1363     case VC_ENDC_WATCH0_DATA0:
1364     case VC_ENDC_WATCH1_DATA0:
1365     case VC_ENDC_WATCH2_DATA0:
1366     case VC_ENDC_WATCH3_DATA0:
1367         /* writing to DATA0 triggers the cache write */
1368         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1369         xive->vc_regs[reg] = val;
1370         pnv_xive2_end_update(xive, watch_engine);
1371         break;
1372 
1373 
1374     /* case VC_ENDC_FLUSH_CTRL: */
1375     case VC_ENDC_FLUSH_POLL:
1376         xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1377         break;
1378 
1379     case VC_ENDC_FLUSH_INJECT:
1380         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ENDC);
1381         break;
1382 
1383     /*
1384      * Indirect invalidation
1385      */
1386     case VC_AT_MACRO_KILL:
1387     case VC_AT_MACRO_KILL_MASK:
1388         break;
1389 
1390     /*
1391      * Interrupt fifo overflow in memory backing store (Not modeled)
1392      */
1393     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1394         break;
1395 
1396     /*
1397      * Synchronisation
1398      */
1399     case VC_ENDC_SYNC_DONE:
1400         break;
1401 
1402     default:
1403         xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1404         return;
1405     }
1406 
1407     xive->vc_regs[reg] = val;
1408 }
1409 
1410 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1411     .read = pnv_xive2_ic_vc_read,
1412     .write = pnv_xive2_ic_vc_write,
1413     .endianness = DEVICE_BIG_ENDIAN,
1414     .valid = {
1415         .min_access_size = 8,
1416         .max_access_size = 8,
1417     },
1418     .impl = {
1419         .min_access_size = 8,
1420         .max_access_size = 8,
1421     },
1422 };
1423 
pnv_xive2_nxc_cache_watch_assign(PnvXive2 * xive)1424 static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2 *xive)
1425 {
1426     uint64_t engine_mask = GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN,
1427                                     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
1428     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1429     uint8_t val;
1430 
1431     /*
1432      * We keep track of which engines are currently busy in the
1433      * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
1434      * the register, we don't return its value but the ID of an engine
1435      * it can use.
1436      * There are 4 engines. 0xFF means no engine is available.
1437      */
1438     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1439     if (val != 0xFF) {
1440         xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1441     }
1442     return val;
1443 }
1444 
pnv_xive2_nxc_cache_watch_release(PnvXive2 * xive,uint8_t watch_engine)1445 static void pnv_xive2_nxc_cache_watch_release(PnvXive2 *xive,
1446                                               uint8_t watch_engine)
1447 {
1448     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1449 
1450     pnv_xive2_cache_watch_release(&state, watch_engine);
1451     xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1452 }
1453 
pnv_xive2_ic_pc_read(void * opaque,hwaddr offset,unsigned size)1454 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1455                                      unsigned size)
1456 {
1457     PnvXive2 *xive = PNV_XIVE2(opaque);
1458     uint64_t val = -1;
1459     uint32_t reg = offset >> 3;
1460     uint8_t watch_engine;
1461 
1462     switch (offset) {
1463     /*
1464      * VSD table settings.
1465      */
1466     case PC_VSD_TABLE_ADDR:
1467     case PC_VSD_TABLE_DATA:
1468         val = xive->pc_regs[reg];
1469         break;
1470 
1471     case PC_NXC_WATCH_ASSIGN:
1472         val = pnv_xive2_nxc_cache_watch_assign(xive);
1473         break;
1474 
1475     case PC_NXC_PROC_CONFIG:
1476         val = xive->pc_regs[reg];
1477         break;
1478 
1479     /*
1480      * cache updates
1481      */
1482     case PC_NXC_WATCH0_SPEC:
1483     case PC_NXC_WATCH1_SPEC:
1484     case PC_NXC_WATCH2_SPEC:
1485     case PC_NXC_WATCH3_SPEC:
1486         watch_engine = (offset - PC_NXC_WATCH0_SPEC) >> 6;
1487         xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1488         pnv_xive2_nxc_cache_watch_release(xive, watch_engine);
1489         val = xive->pc_regs[reg];
1490         break;
1491 
1492     case PC_NXC_WATCH0_DATA0:
1493     case PC_NXC_WATCH1_DATA0:
1494     case PC_NXC_WATCH2_DATA0:
1495     case PC_NXC_WATCH3_DATA0:
1496        /*
1497         * Load DATA registers from cache with data requested by the
1498         * SPEC register
1499         */
1500         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1501         pnv_xive2_nxc_cache_load(xive, watch_engine);
1502         val = xive->pc_regs[reg];
1503         break;
1504 
1505     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1506     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1507     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1508     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1509         val = xive->pc_regs[reg];
1510         break;
1511 
1512     case PC_NXC_FLUSH_CTRL:
1513         xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1514         val = xive->pc_regs[reg];
1515         break;
1516 
1517     /*
1518      * Indirect invalidation
1519      */
1520     case PC_AT_KILL:
1521         xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1522         val = xive->pc_regs[reg];
1523         break;
1524 
1525     default:
1526         xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1527     }
1528 
1529     return val;
1530 }
1531 
pnv_xive2_pc_vst_set_data(PnvXive2 * xive,uint64_t vsd)1532 static void pnv_xive2_pc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
1533 {
1534     uint8_t type = GETFIELD(PC_VSD_TABLE_SELECT,
1535                             xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1536     uint8_t blk = GETFIELD(PC_VSD_TABLE_ADDRESS,
1537                            xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1538 
1539     pnv_xive2_vst_set_data(xive, vsd, type, blk);
1540 }
1541 
pnv_xive2_ic_pc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1542 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1543                                   uint64_t val, unsigned size)
1544 {
1545     PnvXive2 *xive = PNV_XIVE2(opaque);
1546     uint32_t reg = offset >> 3;
1547     uint8_t watch_engine;
1548 
1549     switch (offset) {
1550 
1551     /*
1552      * VSD table settings.
1553      * The Xive2Router model combines both VC and PC sub-engines. We
1554      * allow to configure the tables through both, for the rare cases
1555      * where a table only really needs to be configured for one of
1556      * them (e.g. the NVG table for the presenter). It assumes that
1557      * firmware passes the same address to the VC and PC when tables
1558      * are defined for both, which seems acceptable.
1559      */
1560     case PC_VSD_TABLE_ADDR:
1561         break;
1562     case PC_VSD_TABLE_DATA:
1563         pnv_xive2_pc_vst_set_data(xive, val);
1564         break;
1565 
1566     case PC_NXC_PROC_CONFIG:
1567         break;
1568 
1569     /*
1570      * cache updates
1571      */
1572     case PC_NXC_WATCH0_SPEC:
1573     case PC_NXC_WATCH1_SPEC:
1574     case PC_NXC_WATCH2_SPEC:
1575     case PC_NXC_WATCH3_SPEC:
1576         val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1577         break;
1578 
1579     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1580     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1581     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1582     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1583         break;
1584     case PC_NXC_WATCH0_DATA0:
1585     case PC_NXC_WATCH1_DATA0:
1586     case PC_NXC_WATCH2_DATA0:
1587     case PC_NXC_WATCH3_DATA0:
1588         /* writing to DATA0 triggers the cache write */
1589         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1590         xive->pc_regs[reg] = val;
1591         pnv_xive2_nxc_update(xive, watch_engine);
1592         break;
1593 
1594    /* case PC_NXC_FLUSH_CTRL: */
1595     case PC_NXC_FLUSH_POLL:
1596         xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1597         break;
1598 
1599     case PC_NXC_FLUSH_INJECT:
1600         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_NXC);
1601         break;
1602 
1603     /*
1604      * Indirect invalidation
1605      */
1606     case PC_AT_KILL:
1607     case PC_AT_KILL_MASK:
1608         break;
1609 
1610     default:
1611         xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1612         return;
1613     }
1614 
1615     xive->pc_regs[reg] = val;
1616 }
1617 
1618 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1619     .read = pnv_xive2_ic_pc_read,
1620     .write = pnv_xive2_ic_pc_write,
1621     .endianness = DEVICE_BIG_ENDIAN,
1622     .valid = {
1623         .min_access_size = 8,
1624         .max_access_size = 8,
1625     },
1626     .impl = {
1627         .min_access_size = 8,
1628         .max_access_size = 8,
1629     },
1630 };
1631 
1632 
pnv_xive2_ic_tctxt_read(void * opaque,hwaddr offset,unsigned size)1633 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1634                                         unsigned size)
1635 {
1636     PnvXive2 *xive = PNV_XIVE2(opaque);
1637     uint64_t val = -1;
1638     uint32_t reg = offset >> 3;
1639 
1640     switch (offset) {
1641     /*
1642      * XIVE2 hardware thread enablement
1643      */
1644     case TCTXT_EN0:
1645     case TCTXT_EN1:
1646         val = xive->tctxt_regs[reg];
1647         break;
1648 
1649     case TCTXT_EN0_SET:
1650     case TCTXT_EN0_RESET:
1651         val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1652         break;
1653     case TCTXT_EN1_SET:
1654     case TCTXT_EN1_RESET:
1655         val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1656         break;
1657     case TCTXT_CFG:
1658         val = xive->tctxt_regs[reg];
1659         break;
1660     default:
1661         xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1662     }
1663 
1664     return val;
1665 }
1666 
pnv_xive2_ic_tctxt_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1667 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1668                                      uint64_t val, unsigned size)
1669 {
1670     PnvXive2 *xive = PNV_XIVE2(opaque);
1671     uint32_t reg = offset >> 3;
1672 
1673     switch (offset) {
1674     /*
1675      * XIVE2 hardware thread enablement
1676      */
1677     case TCTXT_EN0: /* Physical Thread Enable */
1678     case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1679         xive->tctxt_regs[reg] = val;
1680         break;
1681 
1682     case TCTXT_EN0_SET:
1683         xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1684         break;
1685     case TCTXT_EN1_SET:
1686         xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1687         break;
1688     case TCTXT_EN0_RESET:
1689         xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1690         break;
1691     case TCTXT_EN1_RESET:
1692         xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1693         break;
1694     case TCTXT_CFG:
1695         xive->tctxt_regs[reg] = val;
1696         break;
1697     default:
1698         xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1699         return;
1700     }
1701 }
1702 
1703 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1704     .read = pnv_xive2_ic_tctxt_read,
1705     .write = pnv_xive2_ic_tctxt_write,
1706     .endianness = DEVICE_BIG_ENDIAN,
1707     .valid = {
1708         .min_access_size = 8,
1709         .max_access_size = 8,
1710     },
1711     .impl = {
1712         .min_access_size = 8,
1713         .max_access_size = 8,
1714     },
1715 };
1716 
1717 /*
1718  * Redirect XSCOM to MMIO handlers
1719  */
pnv_xive2_xscom_read(void * opaque,hwaddr offset,unsigned size)1720 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1721                                      unsigned size)
1722 {
1723     PnvXive2 *xive = PNV_XIVE2(opaque);
1724     uint64_t val = -1;
1725     uint32_t xscom_reg = offset >> 3;
1726     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1727 
1728     switch (xscom_reg) {
1729     case 0x000 ... 0x0FF:
1730         val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1731         break;
1732     case 0x100 ... 0x1FF:
1733         val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1734         break;
1735     case 0x200 ... 0x2FF:
1736         val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1737         break;
1738     case 0x300 ... 0x3FF:
1739         val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1740         break;
1741     default:
1742         xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1743     }
1744 
1745     return val;
1746 }
1747 
pnv_xive2_xscom_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1748 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1749                                   uint64_t val, unsigned size)
1750 {
1751     PnvXive2 *xive = PNV_XIVE2(opaque);
1752     uint32_t xscom_reg = offset >> 3;
1753     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1754 
1755     switch (xscom_reg) {
1756     case 0x000 ... 0x0FF:
1757         pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1758         break;
1759     case 0x100 ... 0x1FF:
1760         pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1761         break;
1762     case 0x200 ... 0x2FF:
1763         pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1764         break;
1765     case 0x300 ... 0x3FF:
1766         pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1767         break;
1768     default:
1769         xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1770     }
1771 }
1772 
1773 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1774     .read = pnv_xive2_xscom_read,
1775     .write = pnv_xive2_xscom_write,
1776     .endianness = DEVICE_BIG_ENDIAN,
1777     .valid = {
1778         .min_access_size = 8,
1779         .max_access_size = 8,
1780     },
1781     .impl = {
1782         .min_access_size = 8,
1783         .max_access_size = 8,
1784     },
1785 };
1786 
1787 /*
1788  * Notify port page. The layout is compatible between 4K and 64K pages :
1789  *
1790  * Page 1           Notify page (writes only)
1791  *  0x000 - 0x7FF   IPI interrupt (NPU)
1792  *  0x800 - 0xFFF   HW interrupt triggers (PSI, PHB)
1793  */
1794 
pnv_xive2_ic_hw_trigger(PnvXive2 * xive,hwaddr addr,uint64_t val)1795 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1796                                     uint64_t val)
1797 {
1798     uint8_t blk;
1799     uint32_t idx;
1800 
1801     if (val & XIVE_TRIGGER_END) {
1802         xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1803                    addr, val);
1804         return;
1805     }
1806 
1807     /*
1808      * Forward the source event notification directly to the Router.
1809      * The source interrupt number should already be correctly encoded
1810      * with the chip block id by the sending device (PHB, PSI).
1811      */
1812     blk = XIVE_EAS_BLOCK(val);
1813     idx = XIVE_EAS_INDEX(val);
1814 
1815     xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1816                          !!(val & XIVE_TRIGGER_PQ));
1817 }
1818 
pnv_xive2_ic_notify_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1819 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1820                                       uint64_t val, unsigned size)
1821 {
1822     PnvXive2 *xive = PNV_XIVE2(opaque);
1823 
1824     /* VC: IPI triggers */
1825     switch (offset) {
1826     case 0x000 ... 0x7FF:
1827         /* TODO: check IPI notify sub-page routing */
1828         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1829         break;
1830 
1831     /* VC: HW triggers */
1832     case 0x800 ... 0xFFF:
1833         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1834         break;
1835 
1836     default:
1837         xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1838     }
1839 }
1840 
pnv_xive2_ic_notify_read(void * opaque,hwaddr offset,unsigned size)1841 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1842                                          unsigned size)
1843 {
1844     PnvXive2 *xive = PNV_XIVE2(opaque);
1845 
1846    /* loads are invalid */
1847     xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1848     return -1;
1849 }
1850 
1851 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1852     .read = pnv_xive2_ic_notify_read,
1853     .write = pnv_xive2_ic_notify_write,
1854     .endianness = DEVICE_BIG_ENDIAN,
1855     .valid = {
1856         .min_access_size = 8,
1857         .max_access_size = 8,
1858     },
1859     .impl = {
1860         .min_access_size = 8,
1861         .max_access_size = 8,
1862     },
1863 };
1864 
pnv_xive2_ic_lsi_read(void * opaque,hwaddr offset,unsigned size)1865 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1866                                       unsigned size)
1867 {
1868     PnvXive2 *xive = PNV_XIVE2(opaque);
1869 
1870     xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1871     return -1;
1872 }
1873 
pnv_xive2_ic_lsi_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1874 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1875                                    uint64_t val, unsigned size)
1876 {
1877     PnvXive2 *xive = PNV_XIVE2(opaque);
1878 
1879     xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1880 }
1881 
1882 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1883     .read = pnv_xive2_ic_lsi_read,
1884     .write = pnv_xive2_ic_lsi_write,
1885     .endianness = DEVICE_BIG_ENDIAN,
1886     .valid = {
1887         .min_access_size = 8,
1888         .max_access_size = 8,
1889     },
1890     .impl = {
1891         .min_access_size = 8,
1892         .max_access_size = 8,
1893     },
1894 };
1895 
1896 /*
1897  * Sync MMIO page (write only)
1898  */
1899 #define PNV_XIVE2_SYNC_IPI              0x000
1900 #define PNV_XIVE2_SYNC_HW               0x080
1901 #define PNV_XIVE2_SYNC_NxC              0x100
1902 #define PNV_XIVE2_SYNC_INT              0x180
1903 #define PNV_XIVE2_SYNC_OS_ESC           0x200
1904 #define PNV_XIVE2_SYNC_POOL_ESC         0x280
1905 #define PNV_XIVE2_SYNC_HARD_ESC         0x300
1906 #define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO   0x800
1907 #define PNV_XIVE2_SYNC_NXC_LD_LCL_CO    0x880
1908 #define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI   0x900
1909 #define PNV_XIVE2_SYNC_NXC_ST_LCL_CI    0x980
1910 #define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI   0xA00
1911 #define PNV_XIVE2_SYNC_NXC_ST_RMT_CI    0xA80
1912 
pnv_xive2_ic_sync_read(void * opaque,hwaddr offset,unsigned size)1913 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1914                                        unsigned size)
1915 {
1916     PnvXive2 *xive = PNV_XIVE2(opaque);
1917 
1918     /* loads are invalid */
1919     xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1920     return -1;
1921 }
1922 
1923 /*
1924  * The sync MMIO space spans two pages.  The lower page is use for
1925  * queue sync "poll" requests while the upper page is used for queue
1926  * sync "inject" requests.  Inject requests require the HW to write
1927  * a byte of all 1's to a predetermined location in memory in order
1928  * to signal completion of the request.  Both pages have the same
1929  * layout, so it is easiest to handle both with a single function.
1930  */
pnv_xive2_ic_sync_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1931 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
1932                                     uint64_t val, unsigned size)
1933 {
1934     PnvXive2 *xive = PNV_XIVE2(opaque);
1935     int inject_type;
1936     hwaddr pg_offset_mask = (1ull << xive->ic_shift) - 1;
1937 
1938     /* adjust offset for inject page */
1939     hwaddr adj_offset = offset & pg_offset_mask;
1940 
1941     switch (adj_offset) {
1942     case PNV_XIVE2_SYNC_IPI:
1943         inject_type = PNV_XIVE2_QUEUE_IPI;
1944         break;
1945     case PNV_XIVE2_SYNC_HW:
1946         inject_type = PNV_XIVE2_QUEUE_HW;
1947         break;
1948     case PNV_XIVE2_SYNC_NxC:
1949         inject_type = PNV_XIVE2_QUEUE_NXC;
1950         break;
1951     case PNV_XIVE2_SYNC_INT:
1952         inject_type = PNV_XIVE2_QUEUE_INT;
1953         break;
1954     case PNV_XIVE2_SYNC_OS_ESC:
1955         inject_type = PNV_XIVE2_QUEUE_OS;
1956         break;
1957     case PNV_XIVE2_SYNC_POOL_ESC:
1958         inject_type = PNV_XIVE2_QUEUE_POOL;
1959         break;
1960     case PNV_XIVE2_SYNC_HARD_ESC:
1961         inject_type = PNV_XIVE2_QUEUE_HARD;
1962         break;
1963     case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO:
1964         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO;
1965         break;
1966     case PNV_XIVE2_SYNC_NXC_LD_LCL_CO:
1967         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_CO;
1968         break;
1969     case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI:
1970         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI;
1971         break;
1972     case PNV_XIVE2_SYNC_NXC_ST_LCL_CI:
1973         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_CI;
1974         break;
1975     case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI:
1976         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI;
1977         break;
1978     case PNV_XIVE2_SYNC_NXC_ST_RMT_CI:
1979         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
1980         break;
1981     default:
1982         xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
1983         return;
1984     }
1985 
1986     /* Write Queue Sync notification byte if writing to sync inject page */
1987     if ((offset & ~pg_offset_mask) != 0) {
1988         pnv_xive2_inject_notify(xive, inject_type);
1989     }
1990 }
1991 
1992 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
1993     .read = pnv_xive2_ic_sync_read,
1994     .write = pnv_xive2_ic_sync_write,
1995     .endianness = DEVICE_BIG_ENDIAN,
1996     .valid = {
1997         .min_access_size = 8,
1998         .max_access_size = 8,
1999     },
2000     .impl = {
2001         .min_access_size = 8,
2002         .max_access_size = 8,
2003     },
2004 };
2005 
2006 /*
2007  * When the TM direct pages of the IC controller are accessed, the
2008  * target HW thread is deduced from the page offset.
2009  */
pnv_xive2_ic_tm_get_pir(PnvXive2 * xive,hwaddr offset)2010 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
2011 {
2012     /* On P10, the node ID shift in the PIR register is 8 bits */
2013     return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
2014 }
2015 
pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 * xive,hwaddr offset)2016 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 *xive,
2017                                                    hwaddr offset)
2018 {
2019     /*
2020      * Indirect TIMA accesses are similar to direct accesses for
2021      * privilege ring 0. So remove any traces of the hw thread ID from
2022      * the offset in the IC BAR as it could be interpreted as the ring
2023      * privilege when calling the underlying direct access functions.
2024      */
2025     return offset & ((1ull << xive->ic_shift) - 1);
2026 }
2027 
pnv_xive2_get_indirect_tctx(PnvXive2 * xive,uint32_t pir)2028 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
2029 {
2030     PnvChip *chip = xive->chip;
2031     PowerPCCPU *cpu = NULL;
2032 
2033     cpu = pnv_chip_find_cpu(chip, pir);
2034     if (!cpu) {
2035         xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
2036         return NULL;
2037     }
2038 
2039     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
2040         xive2_error(xive, "IC: CPU %x is not enabled", pir);
2041     }
2042 
2043     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2044 }
2045 
pnv_xive2_ic_tm_indirect_read(void * opaque,hwaddr offset,unsigned size)2046 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
2047                                               unsigned size)
2048 {
2049     PnvXive2 *xive = PNV_XIVE2(opaque);
2050     XivePresenter *xptr = XIVE_PRESENTER(xive);
2051     hwaddr hw_page_offset;
2052     uint32_t pir;
2053     XiveTCTX *tctx;
2054     uint64_t val = -1;
2055 
2056     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2057     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2058     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2059     if (tctx) {
2060         val = xive_tctx_tm_read(xptr, tctx, hw_page_offset, size);
2061     }
2062 
2063     return val;
2064 }
2065 
pnv_xive2_ic_tm_indirect_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2066 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
2067                                            uint64_t val, unsigned size)
2068 {
2069     PnvXive2 *xive = PNV_XIVE2(opaque);
2070     XivePresenter *xptr = XIVE_PRESENTER(xive);
2071     hwaddr hw_page_offset;
2072     uint32_t pir;
2073     XiveTCTX *tctx;
2074 
2075     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2076     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2077     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2078     if (tctx) {
2079         xive_tctx_tm_write(xptr, tctx, hw_page_offset, val, size);
2080     }
2081 }
2082 
2083 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
2084     .read = pnv_xive2_ic_tm_indirect_read,
2085     .write = pnv_xive2_ic_tm_indirect_write,
2086     .endianness = DEVICE_BIG_ENDIAN,
2087     .valid = {
2088         .min_access_size = 1,
2089         .max_access_size = 8,
2090     },
2091     .impl = {
2092         .min_access_size = 1,
2093         .max_access_size = 8,
2094     },
2095 };
2096 
2097 /*
2098  * TIMA ops
2099  */
pnv_xive2_tm_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)2100 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
2101                                uint64_t value, unsigned size)
2102 {
2103     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2104     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2105     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2106     XivePresenter *xptr = XIVE_PRESENTER(xive);
2107 
2108     xive_tctx_tm_write(xptr, tctx, offset, value, size);
2109 }
2110 
pnv_xive2_tm_read(void * opaque,hwaddr offset,unsigned size)2111 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
2112 {
2113     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2114     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2115     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2116     XivePresenter *xptr = XIVE_PRESENTER(xive);
2117 
2118     return xive_tctx_tm_read(xptr, tctx, offset, size);
2119 }
2120 
2121 static const MemoryRegionOps pnv_xive2_tm_ops = {
2122     .read = pnv_xive2_tm_read,
2123     .write = pnv_xive2_tm_write,
2124     .endianness = DEVICE_BIG_ENDIAN,
2125     .valid = {
2126         .min_access_size = 1,
2127         .max_access_size = 8,
2128     },
2129     .impl = {
2130         .min_access_size = 1,
2131         .max_access_size = 8,
2132     },
2133 };
2134 
pnv_xive2_nvc_read(void * opaque,hwaddr offset,unsigned size)2135 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
2136                                    unsigned size)
2137 {
2138     PnvXive2 *xive = PNV_XIVE2(opaque);
2139 
2140     xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
2141     return -1;
2142 }
2143 
pnv_xive2_nvc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2144 static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
2145                                 uint64_t val, unsigned size)
2146 {
2147     PnvXive2 *xive = PNV_XIVE2(opaque);
2148 
2149     xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
2150 }
2151 
2152 static const MemoryRegionOps pnv_xive2_nvc_ops = {
2153     .read = pnv_xive2_nvc_read,
2154     .write = pnv_xive2_nvc_write,
2155     .endianness = DEVICE_BIG_ENDIAN,
2156     .valid = {
2157         .min_access_size = 8,
2158         .max_access_size = 8,
2159     },
2160     .impl = {
2161         .min_access_size = 8,
2162         .max_access_size = 8,
2163     },
2164 };
2165 
pnv_xive2_nvpg_read(void * opaque,hwaddr offset,unsigned size)2166 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
2167                                     unsigned size)
2168 {
2169     PnvXive2 *xive = PNV_XIVE2(opaque);
2170 
2171     xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
2172     return -1;
2173 }
2174 
pnv_xive2_nvpg_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2175 static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
2176                                  uint64_t val, unsigned size)
2177 {
2178     PnvXive2 *xive = PNV_XIVE2(opaque);
2179 
2180     xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
2181 }
2182 
2183 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
2184     .read = pnv_xive2_nvpg_read,
2185     .write = pnv_xive2_nvpg_write,
2186     .endianness = DEVICE_BIG_ENDIAN,
2187     .valid = {
2188         .min_access_size = 8,
2189         .max_access_size = 8,
2190     },
2191     .impl = {
2192         .min_access_size = 8,
2193         .max_access_size = 8,
2194     },
2195 };
2196 
2197 /*
2198  * POWER10 default capabilities: 0x2000120076f000FC
2199  */
2200 #define PNV_XIVE2_CAPABILITIES  0x2000120076f000FC
2201 
2202 /*
2203  * POWER10 default configuration: 0x0030000033000000
2204  *
2205  * 8bits thread id was dropped for P10
2206  */
2207 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
2208 
pnv_xive2_reset(void * dev)2209 static void pnv_xive2_reset(void *dev)
2210 {
2211     PnvXive2 *xive = PNV_XIVE2(dev);
2212     XiveSource *xsrc = &xive->ipi_source;
2213     Xive2EndSource *end_xsrc = &xive->end_source;
2214 
2215     xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
2216     xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
2217 
2218     /* HW hardwires the #Topology of the chip in the block field */
2219     xive->cq_regs[CQ_XIVE_CFG >> 3] |=
2220         SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
2221 
2222     /* VC and PC cache watch assign mechanism */
2223     xive->vc_regs[VC_ENDC_CFG >> 3] =
2224         SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN, 0ull, 0b0111);
2225     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3] =
2226         SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN, 0ull, 0b0111);
2227 
2228     /* Set default page size to 64k */
2229     xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
2230     xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
2231 
2232     /* Clear source MMIOs */
2233     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
2234         memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
2235     }
2236 
2237     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
2238         memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
2239     }
2240 }
2241 
2242 /*
2243  *  Maximum number of IRQs and ENDs supported by HW. Will be tuned by
2244  *  software.
2245  */
2246 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2247 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2248 
pnv_xive2_realize(DeviceState * dev,Error ** errp)2249 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
2250 {
2251     PnvXive2 *xive = PNV_XIVE2(dev);
2252     PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
2253     XiveSource *xsrc = &xive->ipi_source;
2254     Xive2EndSource *end_xsrc = &xive->end_source;
2255     Error *local_err = NULL;
2256     int i;
2257 
2258     pxc->parent_realize(dev, &local_err);
2259     if (local_err) {
2260         error_propagate(errp, local_err);
2261         return;
2262     }
2263 
2264     assert(xive->chip);
2265 
2266     /*
2267      * The XiveSource and Xive2EndSource objects are realized with the
2268      * maximum allowed HW configuration. The ESB MMIO regions will be
2269      * resized dynamically when the controller is configured by the FW
2270      * to limit accesses to resources not provisioned.
2271      */
2272     object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
2273                             &error_fatal);
2274     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
2275                             &error_fatal);
2276     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
2277                              &error_fatal);
2278     qdev_realize(DEVICE(xsrc), NULL, &local_err);
2279     if (local_err) {
2280         error_propagate(errp, local_err);
2281         return;
2282     }
2283 
2284     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
2285                             &error_fatal);
2286     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
2287                              &error_abort);
2288     qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
2289     if (local_err) {
2290         error_propagate(errp, local_err);
2291         return;
2292     }
2293 
2294     /* XSCOM region, used for initial configuration of the BARs */
2295     memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
2296                           &pnv_xive2_xscom_ops, xive, "xscom-xive",
2297                           PNV10_XSCOM_XIVE2_SIZE << 3);
2298 
2299     /* Interrupt controller MMIO regions */
2300     xive->ic_shift = 16;
2301     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
2302                        PNV10_XIVE2_IC_SIZE);
2303 
2304     for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
2305         memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
2306                          pnv_xive2_ic_regions[i].ops, xive,
2307                          pnv_xive2_ic_regions[i].name,
2308                          pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
2309     }
2310 
2311     /*
2312      * VC MMIO regions.
2313      */
2314     xive->esb_shift = 16;
2315     xive->end_shift = 16;
2316     memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
2317                        PNV10_XIVE2_ESB_SIZE);
2318     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
2319                        PNV10_XIVE2_END_SIZE);
2320 
2321     /* Presenter Controller MMIO region (not modeled) */
2322     xive->nvc_shift = 16;
2323     xive->nvpg_shift = 16;
2324     memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
2325                           &pnv_xive2_nvc_ops, xive,
2326                           "xive-nvc", PNV10_XIVE2_NVC_SIZE);
2327 
2328     memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
2329                           &pnv_xive2_nvpg_ops, xive,
2330                           "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
2331 
2332     /* Thread Interrupt Management Area (Direct) */
2333     xive->tm_shift = 16;
2334     memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
2335                           xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
2336 
2337     qemu_register_reset(pnv_xive2_reset, dev);
2338 }
2339 
2340 static Property pnv_xive2_properties[] = {
2341     DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
2342     DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
2343     DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
2344     DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
2345     DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
2346     DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
2347     DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
2348                        PNV_XIVE2_CAPABILITIES),
2349     DEFINE_PROP_UINT64("config", PnvXive2, config,
2350                        PNV_XIVE2_CONFIGURATION),
2351     DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
2352     DEFINE_PROP_END_OF_LIST(),
2353 };
2354 
pnv_xive2_instance_init(Object * obj)2355 static void pnv_xive2_instance_init(Object *obj)
2356 {
2357     PnvXive2 *xive = PNV_XIVE2(obj);
2358 
2359     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
2360                             TYPE_XIVE_SOURCE);
2361     object_initialize_child(obj, "end_source", &xive->end_source,
2362                             TYPE_XIVE2_END_SOURCE);
2363 }
2364 
pnv_xive2_dt_xscom(PnvXScomInterface * dev,void * fdt,int xscom_offset)2365 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
2366                               int xscom_offset)
2367 {
2368     const char compat_p10[] = "ibm,power10-xive-x";
2369     char *name;
2370     int offset;
2371     uint32_t reg[] = {
2372         cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
2373         cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
2374     };
2375 
2376     name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
2377     offset = fdt_add_subnode(fdt, xscom_offset, name);
2378     _FDT(offset);
2379     g_free(name);
2380 
2381     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2382     _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
2383                      sizeof(compat_p10)));
2384     return 0;
2385 }
2386 
pnv_xive2_class_init(ObjectClass * klass,void * data)2387 static void pnv_xive2_class_init(ObjectClass *klass, void *data)
2388 {
2389     DeviceClass *dc = DEVICE_CLASS(klass);
2390     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2391     Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
2392     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2393     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2394     PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
2395 
2396     xdc->dt_xscom  = pnv_xive2_dt_xscom;
2397 
2398     dc->desc       = "PowerNV XIVE2 Interrupt Controller (POWER10)";
2399     device_class_set_parent_realize(dc, pnv_xive2_realize,
2400                                     &pxc->parent_realize);
2401     device_class_set_props(dc, pnv_xive2_properties);
2402 
2403     xrc->get_eas   = pnv_xive2_get_eas;
2404     xrc->get_pq    = pnv_xive2_get_pq;
2405     xrc->set_pq    = pnv_xive2_set_pq;
2406     xrc->get_end   = pnv_xive2_get_end;
2407     xrc->write_end = pnv_xive2_write_end;
2408     xrc->get_nvp   = pnv_xive2_get_nvp;
2409     xrc->write_nvp = pnv_xive2_write_nvp;
2410     xrc->get_config  = pnv_xive2_get_config;
2411     xrc->get_block_id = pnv_xive2_get_block_id;
2412 
2413     xnc->notify    = pnv_xive2_notify;
2414 
2415     xpc->match_nvt  = pnv_xive2_match_nvt;
2416     xpc->get_config = pnv_xive2_presenter_get_config;
2417 };
2418 
2419 static const TypeInfo pnv_xive2_info = {
2420     .name          = TYPE_PNV_XIVE2,
2421     .parent        = TYPE_XIVE2_ROUTER,
2422     .instance_init = pnv_xive2_instance_init,
2423     .instance_size = sizeof(PnvXive2),
2424     .class_init    = pnv_xive2_class_init,
2425     .class_size    = sizeof(PnvXive2Class),
2426     .interfaces    = (InterfaceInfo[]) {
2427         { TYPE_PNV_XSCOM_INTERFACE },
2428         { }
2429     }
2430 };
2431 
pnv_xive2_register_types(void)2432 static void pnv_xive2_register_types(void)
2433 {
2434     type_register_static(&pnv_xive2_info);
2435 }
2436 
type_init(pnv_xive2_register_types)2437 type_init(pnv_xive2_register_types)
2438 
2439 /*
2440  * If the table is direct, we can compute the number of PQ entries
2441  * provisioned by FW.
2442  */
2443 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2444 {
2445     uint8_t blk = pnv_xive2_block_id(xive);
2446     uint64_t vsd = xive->vsds[VST_ESB][blk];
2447     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2448 
2449     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2450 }
2451 
2452 /*
2453  * Compute the number of entries per indirect subpage.
2454  */
pnv_xive2_vst_per_subpage(PnvXive2 * xive,uint32_t type)2455 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2456 {
2457     uint8_t blk = pnv_xive2_block_id(xive);
2458     uint64_t vsd = xive->vsds[type][blk];
2459     const XiveVstInfo *info = &vst_infos[type];
2460     uint64_t vsd_addr;
2461     uint32_t page_shift;
2462 
2463     /* For direct tables, fake a valid value */
2464     if (!(VSD_INDIRECT & vsd)) {
2465         return 1;
2466     }
2467 
2468     /* Get the page size of the indirect table. */
2469     vsd_addr = vsd & VSD_ADDRESS_MASK;
2470     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2471 
2472     if (!(vsd & VSD_ADDRESS_MASK)) {
2473 #ifdef XIVE2_DEBUG
2474         xive2_error(xive, "VST: invalid %s entry!?", info->name);
2475 #endif
2476         return 0;
2477     }
2478 
2479     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2480 
2481     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2482         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2483                    page_shift);
2484         return 0;
2485     }
2486 
2487     return (1ull << page_shift) / info->size;
2488 }
2489 
pnv_xive2_pic_print_info(PnvXive2 * xive,GString * buf)2490 void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
2491 {
2492     Xive2Router *xrtr = XIVE2_ROUTER(xive);
2493     uint8_t blk = pnv_xive2_block_id(xive);
2494     uint8_t chip_id = xive->chip->chip_id;
2495     uint32_t srcno0 = XIVE_EAS(blk, 0);
2496     uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2497     Xive2Eas eas;
2498     Xive2End end;
2499     Xive2Nvp nvp;
2500     int i;
2501     uint64_t xive_nvp_per_subpage;
2502 
2503     g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
2504                            blk, srcno0, srcno0 + nr_esbs - 1);
2505     xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
2506 
2507     g_string_append_printf(buf, "XIVE[%x] EAT %08x .. %08x\n",
2508                            blk, srcno0, srcno0 + nr_esbs - 1);
2509     for (i = 0; i < nr_esbs; i++) {
2510         if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2511             break;
2512         }
2513         if (!xive2_eas_is_masked(&eas)) {
2514             xive2_eas_pic_print_info(&eas, i, buf);
2515         }
2516     }
2517 
2518     g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
2519                            chip_id, blk);
2520     i = 0;
2521     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2522         xive2_end_eas_pic_print_info(&end, i++, buf);
2523     }
2524 
2525     g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2526     i = 0;
2527     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2528         xive2_end_pic_print_info(&end, i++, buf);
2529     }
2530 
2531     g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
2532                            chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2533     xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2534     for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
2535         while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2536             xive2_nvp_pic_print_info(&nvp, i++, buf);
2537         }
2538     }
2539 }
2540