1 /*
2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
3 *
4 * Copyright (c) 2019-2024, IBM Corporation.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qapi/error.h"
12 #include "target/ppc/cpu.h"
13 #include "system/cpus.h"
14 #include "system/dma.h"
15 #include "hw/ppc/fdt.h"
16 #include "hw/ppc/pnv.h"
17 #include "hw/ppc/pnv_chip.h"
18 #include "hw/ppc/pnv_core.h"
19 #include "hw/ppc/pnv_xscom.h"
20 #include "hw/ppc/xive2.h"
21 #include "hw/ppc/pnv_xive.h"
22 #include "hw/ppc/xive_regs.h"
23 #include "hw/ppc/xive2_regs.h"
24 #include "hw/ppc/ppc.h"
25 #include "hw/qdev-properties.h"
26 #include "system/reset.h"
27 #include "system/qtest.h"
28
29 #include <libfdt.h>
30
31 #include "pnv_xive2_regs.h"
32
33 #undef XIVE2_DEBUG
34
35 /* XIVE Sync or Flush Notification Block */
36 typedef struct XiveSfnBlock {
37 uint8_t bytes[32];
38 } XiveSfnBlock;
39
40 /* XIVE Thread Sync or Flush Notification Area */
41 typedef struct XiveThreadNA {
42 XiveSfnBlock topo[16];
43 } XiveThreadNA;
44
45 /*
46 * Virtual structures table (VST)
47 */
48 #define SBE_PER_BYTE 4
49
50 typedef struct XiveVstInfo {
51 const char *name;
52 uint32_t size;
53 uint32_t max_blocks;
54 } XiveVstInfo;
55
56 static const XiveVstInfo vst_infos[] = {
57
58 [VST_EAS] = { "EAT", sizeof(Xive2Eas), 16 },
59 [VST_ESB] = { "ESB", 1, 16 },
60 [VST_END] = { "ENDT", sizeof(Xive2End), 16 },
61
62 [VST_NVP] = { "NVPT", sizeof(Xive2Nvp), 16 },
63 [VST_NVG] = { "NVGT", sizeof(Xive2Nvgc), 16 },
64 [VST_NVC] = { "NVCT", sizeof(Xive2Nvgc), 16 },
65
66 [VST_IC] = { "IC", 1, /* ? */ 16 }, /* Topology # */
67 [VST_SYNC] = { "SYNC", sizeof(XiveThreadNA), 16 }, /* Topology # */
68
69 /*
70 * This table contains the backing store pages for the interrupt
71 * fifos of the VC sub-engine in case of overflow.
72 *
73 * 0 - IPI,
74 * 1 - HWD,
75 * 2 - NxC,
76 * 3 - INT,
77 * 4 - OS-Queue,
78 * 5 - Pool-Queue,
79 * 6 - Hard-Queue
80 */
81 [VST_ERQ] = { "ERQ", 1, VC_QUEUE_COUNT },
82 };
83
84 #define xive2_error(xive, fmt, ...) \
85 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
86 (xive)->chip->chip_id, ## __VA_ARGS__);
87
88 /*
89 * TODO: Document block id override
90 */
pnv_xive2_block_id(PnvXive2 * xive)91 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
92 {
93 uint8_t blk = xive->chip->chip_id;
94 uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
95
96 if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
97 blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
98 }
99
100 return blk;
101 }
102
103 /*
104 * Remote access to INT controllers. HW uses MMIOs(?). For now, a simple
105 * scan of all the chips INT controller is good enough.
106 */
pnv_xive2_get_remote(uint32_t vsd_type,hwaddr fwd_addr)107 static PnvXive2 *pnv_xive2_get_remote(uint32_t vsd_type, hwaddr fwd_addr)
108 {
109 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
110 int i;
111
112 for (i = 0; i < pnv->num_chips; i++) {
113 Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
114 PnvXive2 *xive = &chip10->xive;
115
116 /*
117 * Is this the XIVE matching the forwarded VSD address is for this
118 * VSD type
119 */
120 if ((vsd_type == VST_ESB && fwd_addr == xive->esb_base) ||
121 (vsd_type == VST_END && fwd_addr == xive->end_base) ||
122 ((vsd_type == VST_NVP ||
123 vsd_type == VST_NVG) && fwd_addr == xive->nvpg_base) ||
124 (vsd_type == VST_NVC && fwd_addr == xive->nvc_base)) {
125 return xive;
126 }
127 }
128
129 qemu_log_mask(LOG_GUEST_ERROR,
130 "XIVE: >>>>> %s vsd_type %u fwd_addr 0x%"HWADDR_PRIx
131 " NOT FOUND\n",
132 __func__, vsd_type, fwd_addr);
133 return NULL;
134 }
135
136 /*
137 * VST accessors for ESB, EAT, ENDT, NVP
138 *
139 * Indirect VST tables are arrays of VSDs pointing to a page (of same
140 * size). Each page is a direct VST table.
141 */
142
143 #define XIVE_VSD_SIZE 8
144
145 /* Indirect page size can be 4K, 64K, 2M, 16M. */
pnv_xive2_vst_page_size_allowed(uint32_t page_shift)146 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
147 {
148 return page_shift == 12 || page_shift == 16 ||
149 page_shift == 21 || page_shift == 24;
150 }
151
pnv_xive2_vst_addr_direct(PnvXive2 * xive,uint32_t type,uint64_t vsd,uint32_t idx)152 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
153 uint64_t vsd, uint32_t idx)
154 {
155 const XiveVstInfo *info = &vst_infos[type];
156 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
157 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
158 uint32_t idx_max;
159
160 idx_max = vst_tsize / info->size - 1;
161 if (idx > idx_max) {
162 #ifdef XIVE2_DEBUG
163 xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
164 info->name, idx, idx_max);
165 #endif
166 return 0;
167 }
168
169 return vst_addr + idx * info->size;
170 }
171
pnv_xive2_vst_addr_indirect(PnvXive2 * xive,uint32_t type,uint64_t vsd,uint32_t idx)172 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
173 uint64_t vsd, uint32_t idx)
174 {
175 const XiveVstInfo *info = &vst_infos[type];
176 uint64_t vsd_addr;
177 uint32_t vsd_idx;
178 uint32_t page_shift;
179 uint32_t vst_per_page;
180
181 /* Get the page size of the indirect table. */
182 vsd_addr = vsd & VSD_ADDRESS_MASK;
183 ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
184
185 if (!(vsd & VSD_ADDRESS_MASK)) {
186 #ifdef XIVE2_DEBUG
187 xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
188 #endif
189 return 0;
190 }
191
192 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
193
194 if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
195 xive2_error(xive, "VST: invalid %s page shift %d", info->name,
196 page_shift);
197 return 0;
198 }
199
200 vst_per_page = (1ull << page_shift) / info->size;
201 vsd_idx = idx / vst_per_page;
202
203 /* Load the VSD we are looking for, if not already done */
204 if (vsd_idx) {
205 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
206 ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
207 MEMTXATTRS_UNSPECIFIED);
208
209 if (!(vsd & VSD_ADDRESS_MASK)) {
210 #ifdef XIVE2_DEBUG
211 xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
212 #endif
213 return 0;
214 }
215
216 /*
217 * Check that the pages have a consistent size across the
218 * indirect table
219 */
220 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
221 xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
222 info->name, idx);
223 return 0;
224 }
225 }
226
227 return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
228 }
229
pnv_xive2_nvc_table_compress_shift(PnvXive2 * xive)230 static uint8_t pnv_xive2_nvc_table_compress_shift(PnvXive2 *xive)
231 {
232 uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS,
233 xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
234 return shift > 8 ? 0 : shift;
235 }
236
pnv_xive2_nvg_table_compress_shift(PnvXive2 * xive)237 static uint8_t pnv_xive2_nvg_table_compress_shift(PnvXive2 *xive)
238 {
239 uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS,
240 xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
241 return shift > 8 ? 0 : shift;
242 }
243
pnv_xive2_vst_addr(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx)244 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
245 uint32_t idx)
246 {
247 const XiveVstInfo *info = &vst_infos[type];
248 uint64_t vsd;
249
250 if (blk >= info->max_blocks) {
251 xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
252 blk, info->name, idx);
253 return 0;
254 }
255
256 vsd = xive->vsds[type][blk];
257 if (vsd == 0) {
258 xive2_error(xive, "VST: vsd == 0 block id %d for VST %s %d !?",
259 blk, info->name, idx);
260 return 0;
261 }
262
263 /* Remote VST access */
264 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
265 xive = pnv_xive2_get_remote(type, (vsd & VSD_ADDRESS_MASK));
266 return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
267 }
268
269 if (type == VST_NVG) {
270 idx >>= pnv_xive2_nvg_table_compress_shift(xive);
271 } else if (type == VST_NVC) {
272 idx >>= pnv_xive2_nvc_table_compress_shift(xive);
273 }
274
275 if (VSD_INDIRECT & vsd) {
276 return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
277 }
278
279 return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
280 }
281
pnv_xive2_vst_read(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data)282 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
283 uint32_t idx, void *data)
284 {
285 const XiveVstInfo *info = &vst_infos[type];
286 uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
287 MemTxResult result;
288
289 if (!addr) {
290 return -1;
291 }
292
293 result = address_space_read(&address_space_memory, addr,
294 MEMTXATTRS_UNSPECIFIED, data,
295 info->size);
296 if (result != MEMTX_OK) {
297 xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
298 " for VST %s %x/%x\n", addr, info->name, blk, idx);
299 return -1;
300 }
301 return 0;
302 }
303
304 #define XIVE_VST_WORD_ALL -1
305
pnv_xive2_vst_write(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data,uint32_t word_number)306 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
307 uint32_t idx, void *data, uint32_t word_number)
308 {
309 const XiveVstInfo *info = &vst_infos[type];
310 uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
311 MemTxResult result;
312
313 if (!addr) {
314 return -1;
315 }
316
317 if (word_number == XIVE_VST_WORD_ALL) {
318 result = address_space_write(&address_space_memory, addr,
319 MEMTXATTRS_UNSPECIFIED, data,
320 info->size);
321 } else {
322 result = address_space_write(&address_space_memory,
323 addr + word_number * 4,
324 MEMTXATTRS_UNSPECIFIED,
325 data + word_number * 4, 4);
326 }
327
328 if (result != MEMTX_OK) {
329 xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
330 "for VST %s %x/%x\n", addr, info->name, blk, idx);
331 return -1;
332 }
333 return 0;
334 }
335
pnv_xive2_get_pq(Xive2Router * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)336 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
337 uint8_t *pq)
338 {
339 PnvXive2 *xive = PNV_XIVE2(xrtr);
340
341 if (pnv_xive2_block_id(xive) != blk) {
342 xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
343 return -1;
344 }
345
346 *pq = xive_source_esb_get(&xive->ipi_source, idx);
347 return 0;
348 }
349
pnv_xive2_set_pq(Xive2Router * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)350 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
351 uint8_t *pq)
352 {
353 PnvXive2 *xive = PNV_XIVE2(xrtr);
354
355 if (pnv_xive2_block_id(xive) != blk) {
356 xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
357 return -1;
358 }
359
360 *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
361 return 0;
362 }
363
pnv_xive2_get_end(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2End * end)364 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
365 Xive2End *end)
366 {
367 return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
368 }
369
pnv_xive2_write_end(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2End * end,uint8_t word_number)370 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
371 Xive2End *end, uint8_t word_number)
372 {
373 return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
374 word_number);
375 }
376
pnv_xive2_get_current_pir(PnvXive2 * xive)377 static inline int pnv_xive2_get_current_pir(PnvXive2 *xive)
378 {
379 if (!qtest_enabled()) {
380 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
381 return ppc_cpu_pir(cpu);
382 }
383 return 0;
384 }
385
386 /*
387 * After SW injects a Queue Sync or Cache Flush operation, HW will notify
388 * SW of the completion of the operation by writing a byte of all 1's (0xff)
389 * to a specific memory location. The memory location is calculated by first
390 * looking up a base address in the SYNC VSD using the Topology ID of the
391 * originating thread as the "block" number. This points to a
392 * 64k block of memory that is further divided into 128 512 byte chunks of
393 * memory, which is indexed by the thread id of the requesting thread.
394 * Finally, this 512 byte chunk of memory is divided into 16 32 byte
395 * chunks which are indexed by the topology id of the targeted IC's chip.
396 * The values below are the offsets into that 32 byte chunk of memory for
397 * each type of cache flush or queue sync operation.
398 */
399 #define PNV_XIVE2_QUEUE_IPI 0x00
400 #define PNV_XIVE2_QUEUE_HW 0x01
401 #define PNV_XIVE2_QUEUE_NXC 0x02
402 #define PNV_XIVE2_QUEUE_INT 0x03
403 #define PNV_XIVE2_QUEUE_OS 0x04
404 #define PNV_XIVE2_QUEUE_POOL 0x05
405 #define PNV_XIVE2_QUEUE_HARD 0x06
406 #define PNV_XIVE2_CACHE_ENDC 0x08
407 #define PNV_XIVE2_CACHE_ESBC 0x09
408 #define PNV_XIVE2_CACHE_EASC 0x0a
409 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO 0x10
410 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO 0x11
411 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI 0x12
412 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI 0x13
413 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI 0x14
414 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI 0x15
415 #define PNV_XIVE2_CACHE_NXC 0x18
416
pnv_xive2_inject_notify(PnvXive2 * xive,int type)417 static int pnv_xive2_inject_notify(PnvXive2 *xive, int type)
418 {
419 uint64_t addr;
420 int pir = pnv_xive2_get_current_pir(xive);
421 int thread_nr = PNV10_PIR2THREAD(pir);
422 int thread_topo_id = PNV10_PIR2CHIP(pir);
423 int ic_topo_id = xive->chip->chip_id;
424 uint64_t offset = ic_topo_id * sizeof(XiveSfnBlock);
425 uint8_t byte = 0xff;
426 MemTxResult result;
427
428 /* Retrieve the address of requesting thread's notification area */
429 addr = pnv_xive2_vst_addr(xive, VST_SYNC, thread_topo_id, thread_nr);
430
431 if (!addr) {
432 xive2_error(xive, "VST: no SYNC entry %x/%x !?",
433 thread_topo_id, thread_nr);
434 return -1;
435 }
436
437 address_space_stb(&address_space_memory, addr + offset + type, byte,
438 MEMTXATTRS_UNSPECIFIED, &result);
439 assert(result == MEMTX_OK);
440
441 return 0;
442 }
443
pnv_xive2_end_update(PnvXive2 * xive,uint8_t watch_engine)444 static int pnv_xive2_end_update(PnvXive2 *xive, uint8_t watch_engine)
445 {
446 uint8_t blk;
447 uint32_t idx;
448 int i, spec_reg, data_reg;
449 uint64_t endc_watch[4];
450
451 assert(watch_engine < ARRAY_SIZE(endc_watch));
452
453 spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
454 data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
455 blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
456 idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
457
458 for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
459 endc_watch[i] = cpu_to_be64(xive->vc_regs[data_reg + i]);
460 }
461
462 return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
463 XIVE_VST_WORD_ALL);
464 }
465
pnv_xive2_end_cache_load(PnvXive2 * xive,uint8_t watch_engine)466 static void pnv_xive2_end_cache_load(PnvXive2 *xive, uint8_t watch_engine)
467 {
468 uint8_t blk;
469 uint32_t idx;
470 uint64_t endc_watch[4] = { 0 };
471 int i, spec_reg, data_reg;
472
473 assert(watch_engine < ARRAY_SIZE(endc_watch));
474
475 spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
476 data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
477 blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
478 idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
479
480 if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
481 xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
482 }
483
484 for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
485 xive->vc_regs[data_reg + i] = be64_to_cpu(endc_watch[i]);
486 }
487 }
488
pnv_xive2_get_nvp(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Nvp * nvp)489 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
490 Xive2Nvp *nvp)
491 {
492 return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
493 }
494
pnv_xive2_write_nvp(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Nvp * nvp,uint8_t word_number)495 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
496 Xive2Nvp *nvp, uint8_t word_number)
497 {
498 return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
499 word_number);
500 }
501
pnv_xive2_get_nvgc(Xive2Router * xrtr,bool crowd,uint8_t blk,uint32_t idx,Xive2Nvgc * nvgc)502 static int pnv_xive2_get_nvgc(Xive2Router *xrtr, bool crowd,
503 uint8_t blk, uint32_t idx,
504 Xive2Nvgc *nvgc)
505 {
506 return pnv_xive2_vst_read(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
507 blk, idx, nvgc);
508 }
509
pnv_xive2_write_nvgc(Xive2Router * xrtr,bool crowd,uint8_t blk,uint32_t idx,Xive2Nvgc * nvgc)510 static int pnv_xive2_write_nvgc(Xive2Router *xrtr, bool crowd,
511 uint8_t blk, uint32_t idx,
512 Xive2Nvgc *nvgc)
513 {
514 return pnv_xive2_vst_write(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
515 blk, idx, nvgc,
516 XIVE_VST_WORD_ALL);
517 }
518
pnv_xive2_nxc_to_table_type(uint8_t nxc_type,uint32_t * table_type)519 static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
520 {
521 switch (nxc_type) {
522 case PC_NXC_WATCH_NXC_NVP:
523 *table_type = VST_NVP;
524 break;
525 case PC_NXC_WATCH_NXC_NVG:
526 *table_type = VST_NVG;
527 break;
528 case PC_NXC_WATCH_NXC_NVC:
529 *table_type = VST_NVC;
530 break;
531 default:
532 qemu_log_mask(LOG_GUEST_ERROR,
533 "XIVE: invalid table type for nxc operation\n");
534 return -1;
535 }
536 return 0;
537 }
538
pnv_xive2_nxc_update(PnvXive2 * xive,uint8_t watch_engine)539 static int pnv_xive2_nxc_update(PnvXive2 *xive, uint8_t watch_engine)
540 {
541 uint8_t blk, nxc_type;
542 uint32_t idx, table_type = -1;
543 int i, spec_reg, data_reg;
544 uint64_t nxc_watch[4];
545
546 assert(watch_engine < ARRAY_SIZE(nxc_watch));
547
548 spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
549 data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
550 nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
551 blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
552 idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
553
554 assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
555
556 for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
557 nxc_watch[i] = cpu_to_be64(xive->pc_regs[data_reg + i]);
558 }
559
560 return pnv_xive2_vst_write(xive, table_type, blk, idx, nxc_watch,
561 XIVE_VST_WORD_ALL);
562 }
563
pnv_xive2_nxc_cache_load(PnvXive2 * xive,uint8_t watch_engine)564 static void pnv_xive2_nxc_cache_load(PnvXive2 *xive, uint8_t watch_engine)
565 {
566 uint8_t blk, nxc_type;
567 uint32_t idx, table_type = -1;
568 uint64_t nxc_watch[4] = { 0 };
569 int i, spec_reg, data_reg;
570
571 assert(watch_engine < ARRAY_SIZE(nxc_watch));
572
573 spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
574 data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
575 nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
576 blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
577 idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
578
579 assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
580
581 if (pnv_xive2_vst_read(xive, table_type, blk, idx, nxc_watch)) {
582 xive2_error(xive, "VST: no NXC entry %x/%x in %s table!?",
583 blk, idx, vst_infos[table_type].name);
584 }
585
586 for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
587 xive->pc_regs[data_reg + i] = be64_to_cpu(nxc_watch[i]);
588 }
589 }
590
pnv_xive2_get_eas(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Eas * eas)591 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
592 Xive2Eas *eas)
593 {
594 PnvXive2 *xive = PNV_XIVE2(xrtr);
595
596 if (pnv_xive2_block_id(xive) != blk) {
597 xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
598 return -1;
599 }
600
601 return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
602 }
603
pnv_xive2_get_config(Xive2Router * xrtr)604 static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
605 {
606 PnvXive2 *xive = PNV_XIVE2(xrtr);
607 uint32_t cfg = 0;
608 uint64_t reg = xive->cq_regs[CQ_XIVE_CFG >> 3];
609
610 if (reg & CQ_XIVE_CFG_GEN1_TIMA_OS) {
611 cfg |= XIVE2_GEN1_TIMA_OS;
612 }
613
614 if (reg & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
615 cfg |= XIVE2_VP_SAVE_RESTORE;
616 }
617
618 if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE, reg) ==
619 CQ_XIVE_CFG_THREADID_8BITS) {
620 cfg |= XIVE2_THREADID_8BITS;
621 }
622
623 if (reg & CQ_XIVE_CFG_EN_VP_GRP_PRIORITY) {
624 cfg |= XIVE2_EN_VP_GRP_PRIORITY;
625 }
626
627 cfg = SETFIELD(XIVE2_VP_INT_PRIO, cfg,
628 GETFIELD(CQ_XIVE_CFG_VP_INT_PRIO, reg));
629
630 return cfg;
631 }
632
pnv_xive2_is_cpu_enabled(PnvXive2 * xive,PowerPCCPU * cpu)633 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
634 {
635 int pir = ppc_cpu_pir(cpu);
636 uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
637 uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
638 uint32_t bit = pir & 0x3f;
639
640 return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
641 }
642
pnv_xive2_match_nvt(XivePresenter * xptr,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool crowd,bool cam_ignore,uint8_t priority,uint32_t logic_serv,XiveTCTXMatch * match)643 static bool pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
644 uint8_t nvt_blk, uint32_t nvt_idx,
645 bool crowd, bool cam_ignore, uint8_t priority,
646 uint32_t logic_serv, XiveTCTXMatch *match)
647 {
648 PnvXive2 *xive = PNV_XIVE2(xptr);
649 PnvChip *chip = xive->chip;
650 int i, j;
651 bool gen1_tima_os =
652 xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
653 static int next_start_core;
654 static int next_start_thread;
655 int start_core = next_start_core;
656 int start_thread = next_start_thread;
657
658 for (i = 0; i < chip->nr_cores; i++) {
659 PnvCore *pc = chip->cores[(i + start_core) % chip->nr_cores];
660 CPUCore *cc = CPU_CORE(pc);
661
662 for (j = 0; j < cc->nr_threads; j++) {
663 /* Start search for match with different thread each call */
664 PowerPCCPU *cpu = pc->threads[(j + start_thread) % cc->nr_threads];
665 XiveTCTX *tctx;
666 int ring;
667
668 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
669 continue;
670 }
671
672 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
673
674 if (gen1_tima_os) {
675 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
676 nvt_idx, cam_ignore,
677 logic_serv);
678 } else {
679 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
680 nvt_idx, crowd, cam_ignore,
681 logic_serv);
682 }
683
684 if (ring != -1) {
685 /*
686 * For VP-specific match, finding more than one is a
687 * problem. For group notification, it's possible.
688 */
689 if (!cam_ignore && match->tctx) {
690 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
691 "thread context NVT %x/%x\n",
692 nvt_blk, nvt_idx);
693 /* Should set a FIR if we ever model it */
694 match->count++;
695 continue;
696 }
697 /*
698 * For a group notification, we need to know if the
699 * match is precluded first by checking the current
700 * thread priority. If the interrupt can be delivered,
701 * we always notify the first match (for now).
702 */
703 if (cam_ignore &&
704 xive2_tm_irq_precluded(tctx, ring, priority)) {
705 match->precluded = true;
706 } else {
707 if (!match->tctx) {
708 match->ring = ring;
709 match->tctx = tctx;
710
711 next_start_thread = j + start_thread + 1;
712 if (next_start_thread >= cc->nr_threads) {
713 next_start_thread = 0;
714 next_start_core = i + start_core + 1;
715 if (next_start_core >= chip->nr_cores) {
716 next_start_core = 0;
717 }
718 }
719 }
720 match->count++;
721 }
722 }
723 }
724 }
725
726 return !!match->count;
727 }
728
pnv_xive2_presenter_get_config(XivePresenter * xptr)729 static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
730 {
731 PnvXive2 *xive = PNV_XIVE2(xptr);
732 uint32_t cfg = 0;
733
734 if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
735 cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
736 }
737 return cfg;
738 }
739
pnv_xive2_broadcast(XivePresenter * xptr,uint8_t nvt_blk,uint32_t nvt_idx,bool crowd,bool ignore,uint8_t priority)740 static int pnv_xive2_broadcast(XivePresenter *xptr,
741 uint8_t nvt_blk, uint32_t nvt_idx,
742 bool crowd, bool ignore, uint8_t priority)
743 {
744 PnvXive2 *xive = PNV_XIVE2(xptr);
745 PnvChip *chip = xive->chip;
746 int i, j;
747 bool gen1_tima_os =
748 xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
749
750 for (i = 0; i < chip->nr_cores; i++) {
751 PnvCore *pc = chip->cores[i];
752 CPUCore *cc = CPU_CORE(pc);
753
754 for (j = 0; j < cc->nr_threads; j++) {
755 PowerPCCPU *cpu = pc->threads[j];
756 XiveTCTX *tctx;
757 int ring;
758
759 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
760 continue;
761 }
762
763 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
764
765 if (gen1_tima_os) {
766 ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
767 nvt_idx, ignore, 0);
768 } else {
769 ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
770 nvt_idx, crowd, ignore, 0);
771 }
772
773 if (ring != -1) {
774 xive2_tm_set_lsmfb(tctx, ring, priority);
775 }
776 }
777 }
778 return 0;
779 }
780
pnv_xive2_get_block_id(Xive2Router * xrtr)781 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
782 {
783 return pnv_xive2_block_id(PNV_XIVE2(xrtr));
784 }
785
786 /*
787 * The TIMA MMIO space is shared among the chips and to identify the
788 * chip from which the access is being done, we extract the chip id
789 * from the PIR.
790 */
pnv_xive2_tm_get_xive(PowerPCCPU * cpu)791 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
792 {
793 int pir = ppc_cpu_pir(cpu);
794 XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
795 PnvXive2 *xive = PNV_XIVE2(xptr);
796
797 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
798 xive2_error(xive, "IC: CPU %x is not enabled", pir);
799 }
800 return xive;
801 }
802
803 /*
804 * The internal sources of the interrupt controller have no knowledge
805 * of the XIVE2 chip on which they reside. Encode the block id in the
806 * source interrupt number before forwarding the source event
807 * notification to the Router. This is required on a multichip system.
808 */
pnv_xive2_notify(XiveNotifier * xn,uint32_t srcno,bool pq_checked)809 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
810 {
811 PnvXive2 *xive = PNV_XIVE2(xn);
812 uint8_t blk = pnv_xive2_block_id(xive);
813
814 xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
815 }
816
817 /*
818 * Set Translation Tables
819 *
820 * TODO add support for multiple sets
821 */
pnv_xive2_stt_set_data(PnvXive2 * xive,uint64_t val)822 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
823 {
824 uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
825 uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
826 xive->cq_regs[CQ_TAR >> 3]);
827
828 switch (tsel) {
829 case CQ_TAR_NVPG:
830 case CQ_TAR_ESB:
831 case CQ_TAR_END:
832 case CQ_TAR_NVC:
833 xive->tables[tsel][entry] = val;
834 break;
835 default:
836 xive2_error(xive, "IC: unsupported table %d", tsel);
837 return -1;
838 }
839
840 if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
841 xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
842 xive->cq_regs[CQ_TAR >> 3], ++entry);
843 }
844
845 return 0;
846 }
847 /*
848 * Virtual Structure Tables (VST) configuration
849 */
pnv_xive2_vst_set_exclusive(PnvXive2 * xive,uint8_t type,uint8_t blk,uint64_t vsd)850 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
851 uint8_t blk, uint64_t vsd)
852 {
853 Xive2EndSource *end_xsrc = &xive->end_source;
854 XiveSource *xsrc = &xive->ipi_source;
855 const XiveVstInfo *info = &vst_infos[type];
856 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
857 uint64_t vst_tsize = 1ull << page_shift;
858 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
859
860 /* Basic checks */
861
862 if (VSD_INDIRECT & vsd) {
863 if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
864 xive2_error(xive, "VST: invalid %s page shift %d", info->name,
865 page_shift);
866 return;
867 }
868 }
869
870 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
871 xive2_error(xive, "VST: %s table address 0x%"PRIx64
872 " is not aligned with page shift %d",
873 info->name, vst_addr, page_shift);
874 return;
875 }
876
877 /* Record the table configuration (in SRAM on HW) */
878 xive->vsds[type][blk] = vsd;
879
880 /* Now tune the models with the configuration provided by the FW */
881
882 switch (type) {
883 case VST_ESB:
884 /*
885 * Backing store pages for the source PQ bits. The model does
886 * not use these PQ bits backed in RAM because the XiveSource
887 * model has its own.
888 *
889 * If the table is direct, we can compute the number of PQ
890 * entries provisioned by FW (such as skiboot) and resize the
891 * ESB window accordingly.
892 */
893 if (memory_region_is_mapped(&xsrc->esb_mmio)) {
894 memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
895 }
896 if (!(VSD_INDIRECT & vsd)) {
897 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
898 * (1ull << xsrc->esb_shift));
899 }
900
901 memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
902 break;
903
904 case VST_EAS: /* Nothing to be done */
905 break;
906
907 case VST_END:
908 /*
909 * Backing store pages for the END.
910 */
911 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
912 memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
913 }
914 if (!(VSD_INDIRECT & vsd)) {
915 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
916 * (1ull << end_xsrc->esb_shift));
917 }
918 memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
919 break;
920
921 case VST_NVP: /* Not modeled */
922 case VST_NVG: /* Not modeled */
923 case VST_NVC: /* Not modeled */
924 case VST_IC: /* Not modeled */
925 case VST_SYNC: /* Not modeled */
926 case VST_ERQ: /* Not modeled */
927 break;
928
929 default:
930 g_assert_not_reached();
931 }
932 }
933
934 /*
935 * Both PC and VC sub-engines are configured as each use the Virtual
936 * Structure Tables
937 */
pnv_xive2_vst_set_data(PnvXive2 * xive,uint64_t vsd,uint8_t type,uint8_t blk)938 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd,
939 uint8_t type, uint8_t blk)
940 {
941 uint8_t mode = GETFIELD(VSD_MODE, vsd);
942 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
943
944 if (type > VST_ERQ) {
945 xive2_error(xive, "VST: invalid table type %d", type);
946 return;
947 }
948
949 if (blk >= vst_infos[type].max_blocks) {
950 xive2_error(xive, "VST: invalid block id %d for"
951 " %s table", blk, vst_infos[type].name);
952 return;
953 }
954
955 if (!vst_addr) {
956 xive2_error(xive, "VST: invalid %s table address",
957 vst_infos[type].name);
958 return;
959 }
960
961 switch (mode) {
962 case VSD_MODE_FORWARD:
963 xive->vsds[type][blk] = vsd;
964 break;
965
966 case VSD_MODE_EXCLUSIVE:
967 pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
968 break;
969
970 default:
971 xive2_error(xive, "VST: unsupported table mode %d", mode);
972 return;
973 }
974 }
975
pnv_xive2_vc_vst_set_data(PnvXive2 * xive,uint64_t vsd)976 static void pnv_xive2_vc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
977 {
978 uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
979 xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
980 uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
981 xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
982
983 pnv_xive2_vst_set_data(xive, vsd, type, blk);
984 }
985
986 /*
987 * MMIO handlers
988 */
989
990
991 /*
992 * IC BAR layout
993 *
994 * Page 0: Internal CQ register accesses (reads & writes)
995 * Page 1: Internal PC register accesses (reads & writes)
996 * Page 2: Internal VC register accesses (reads & writes)
997 * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
998 * Page 4: Notify Port page (writes only, w/data),
999 * Page 5: Reserved
1000 * Page 6: Sync Poll page (writes only, dataless)
1001 * Page 7: Sync Inject page (writes only, dataless)
1002 * Page 8: LSI Trigger page (writes only, dataless)
1003 * Page 9: LSI SB Management page (reads & writes dataless)
1004 * Pages 10-255: Reserved
1005 * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
1006 * covering the 128 threads in P10.
1007 * Pages 384-511: Reserved
1008 */
1009 typedef struct PnvXive2Region {
1010 const char *name;
1011 uint32_t pgoff;
1012 uint32_t pgsize;
1013 const MemoryRegionOps *ops;
1014 } PnvXive2Region;
1015
1016 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
1017 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
1018 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
1019 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
1020 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
1021 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
1022 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
1023 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
1024
1025 /* 512 pages. 4K: 2M range, 64K: 32M range */
1026 static const PnvXive2Region pnv_xive2_ic_regions[] = {
1027 { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops },
1028 { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops },
1029 { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops },
1030 { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops },
1031 { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops },
1032 /* page 5 reserved */
1033 { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops },
1034 { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops },
1035 /* pages 10-255 reserved */
1036 { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops },
1037 /* pages 384-511 reserved */
1038 };
1039
1040 /*
1041 * CQ operations
1042 */
1043
pnv_xive2_ic_cq_read(void * opaque,hwaddr offset,unsigned size)1044 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
1045 unsigned size)
1046 {
1047 PnvXive2 *xive = PNV_XIVE2(opaque);
1048 uint32_t reg = offset >> 3;
1049 uint64_t val = 0;
1050
1051 switch (offset) {
1052 case CQ_XIVE_CAP: /* Set at reset */
1053 case CQ_XIVE_CFG:
1054 val = xive->cq_regs[reg];
1055 break;
1056 case CQ_MSGSND: /* TODO check the #cores of the machine */
1057 val = 0xffffffff00000000;
1058 break;
1059 case CQ_CFG_PB_GEN:
1060 val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
1061 break;
1062 default:
1063 xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
1064 }
1065
1066 return val;
1067 }
1068
pnv_xive2_bar_size(uint64_t val)1069 static uint64_t pnv_xive2_bar_size(uint64_t val)
1070 {
1071 return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
1072 }
1073
pnv_xive2_ic_cq_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1074 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
1075 uint64_t val, unsigned size)
1076 {
1077 PnvXive2 *xive = PNV_XIVE2(opaque);
1078 MemoryRegion *sysmem = get_system_memory();
1079 uint32_t reg = offset >> 3;
1080 int i;
1081
1082 switch (offset) {
1083 case CQ_XIVE_CFG:
1084 case CQ_RST_CTL: /* TODO: reset all BARs */
1085 break;
1086
1087 case CQ_IC_BAR:
1088 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
1089 if (!(val & CQ_IC_BAR_VALID)) {
1090 xive->ic_base = 0;
1091 if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
1092 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1093 memory_region_del_subregion(&xive->ic_mmio,
1094 &xive->ic_mmios[i]);
1095 }
1096 memory_region_del_subregion(sysmem, &xive->ic_mmio);
1097 }
1098 } else {
1099 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
1100 if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
1101 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1102 memory_region_add_subregion(&xive->ic_mmio,
1103 pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
1104 &xive->ic_mmios[i]);
1105 }
1106 memory_region_add_subregion(sysmem, xive->ic_base,
1107 &xive->ic_mmio);
1108 }
1109 }
1110 break;
1111
1112 case CQ_TM_BAR:
1113 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1114 if (!(val & CQ_TM_BAR_VALID)) {
1115 xive->tm_base = 0;
1116 if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
1117 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1118 }
1119 } else {
1120 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1121 if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
1122 memory_region_add_subregion(sysmem, xive->tm_base,
1123 &xive->tm_mmio);
1124 }
1125 }
1126 break;
1127
1128 case CQ_ESB_BAR:
1129 xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
1130 if (!(val & CQ_BAR_VALID)) {
1131 xive->esb_base = 0;
1132 if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1133 memory_region_del_subregion(sysmem, &xive->esb_mmio);
1134 }
1135 } else {
1136 xive->esb_base = val & CQ_BAR_ADDR;
1137 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1138 memory_region_set_size(&xive->esb_mmio,
1139 pnv_xive2_bar_size(val));
1140 memory_region_add_subregion(sysmem, xive->esb_base,
1141 &xive->esb_mmio);
1142 }
1143 }
1144 break;
1145
1146 case CQ_END_BAR:
1147 xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
1148 if (!(val & CQ_BAR_VALID)) {
1149 xive->end_base = 0;
1150 if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1151 memory_region_del_subregion(sysmem, &xive->end_mmio);
1152 }
1153 } else {
1154 xive->end_base = val & CQ_BAR_ADDR;
1155 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1156 memory_region_set_size(&xive->end_mmio,
1157 pnv_xive2_bar_size(val));
1158 memory_region_add_subregion(sysmem, xive->end_base,
1159 &xive->end_mmio);
1160 }
1161 }
1162 break;
1163
1164 case CQ_NVC_BAR:
1165 xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
1166 if (!(val & CQ_BAR_VALID)) {
1167 xive->nvc_base = 0;
1168 if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1169 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
1170 }
1171 } else {
1172 xive->nvc_base = val & CQ_BAR_ADDR;
1173 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1174 memory_region_set_size(&xive->nvc_mmio,
1175 pnv_xive2_bar_size(val));
1176 memory_region_add_subregion(sysmem, xive->nvc_base,
1177 &xive->nvc_mmio);
1178 }
1179 }
1180 break;
1181
1182 case CQ_NVPG_BAR:
1183 xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
1184 if (!(val & CQ_BAR_VALID)) {
1185 xive->nvpg_base = 0;
1186 if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1187 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
1188 }
1189 } else {
1190 xive->nvpg_base = val & CQ_BAR_ADDR;
1191 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1192 memory_region_set_size(&xive->nvpg_mmio,
1193 pnv_xive2_bar_size(val));
1194 memory_region_add_subregion(sysmem, xive->nvpg_base,
1195 &xive->nvpg_mmio);
1196 }
1197 }
1198 break;
1199
1200 case CQ_TAR: /* Set Translation Table Address */
1201 break;
1202 case CQ_TDR: /* Set Translation Table Data */
1203 pnv_xive2_stt_set_data(xive, val);
1204 break;
1205 case CQ_FIRMASK_OR: /* FIR error reporting */
1206 break;
1207 default:
1208 xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx" value 0x%"PRIx64,
1209 offset, val);
1210 return;
1211 }
1212
1213 xive->cq_regs[reg] = val;
1214 }
1215
1216 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
1217 .read = pnv_xive2_ic_cq_read,
1218 .write = pnv_xive2_ic_cq_write,
1219 .endianness = DEVICE_BIG_ENDIAN,
1220 .valid = {
1221 .min_access_size = 8,
1222 .max_access_size = 8,
1223 },
1224 .impl = {
1225 .min_access_size = 8,
1226 .max_access_size = 8,
1227 },
1228 };
1229
pnv_xive2_cache_watch_assign(uint64_t engine_mask,uint64_t * state)1230 static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask,
1231 uint64_t *state)
1232 {
1233 uint8_t val = 0xFF;
1234 int i;
1235
1236 for (i = 3; i >= 0; i--) {
1237 if (BIT(i) & engine_mask) {
1238 if (!(BIT(i) & *state)) {
1239 *state |= BIT(i);
1240 val = 3 - i;
1241 break;
1242 }
1243 }
1244 }
1245 return val;
1246 }
1247
pnv_xive2_cache_watch_release(uint64_t * state,uint8_t watch_engine)1248 static void pnv_xive2_cache_watch_release(uint64_t *state, uint8_t watch_engine)
1249 {
1250 uint8_t engine_bit = 3 - watch_engine;
1251
1252 if (*state & BIT(engine_bit)) {
1253 *state &= ~BIT(engine_bit);
1254 }
1255 }
1256
pnv_xive2_endc_cache_watch_assign(PnvXive2 * xive)1257 static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2 *xive)
1258 {
1259 uint64_t engine_mask = GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN,
1260 xive->vc_regs[VC_ENDC_CFG >> 3]);
1261 uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1262 uint8_t val;
1263
1264 /*
1265 * We keep track of which engines are currently busy in the
1266 * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
1267 * the register, we don't return its value but the ID of an engine
1268 * it can use.
1269 * There are 4 engines. 0xFF means no engine is available.
1270 */
1271 val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1272 if (val != 0xFF) {
1273 xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1274 }
1275 return val;
1276 }
1277
pnv_xive2_endc_cache_watch_release(PnvXive2 * xive,uint8_t watch_engine)1278 static void pnv_xive2_endc_cache_watch_release(PnvXive2 *xive,
1279 uint8_t watch_engine)
1280 {
1281 uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1282
1283 pnv_xive2_cache_watch_release(&state, watch_engine);
1284 xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1285 }
1286
pnv_xive2_ic_vc_read(void * opaque,hwaddr offset,unsigned size)1287 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
1288 unsigned size)
1289 {
1290 PnvXive2 *xive = PNV_XIVE2(opaque);
1291 uint64_t val = 0;
1292 uint32_t reg = offset >> 3;
1293 uint8_t watch_engine;
1294
1295 switch (offset) {
1296 /*
1297 * VSD table settings.
1298 */
1299 case VC_VSD_TABLE_ADDR:
1300 case VC_VSD_TABLE_DATA:
1301 val = xive->vc_regs[reg];
1302 break;
1303
1304 /*
1305 * ESB cache updates (not modeled)
1306 */
1307 case VC_ESBC_FLUSH_CTRL:
1308 xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
1309 val = xive->vc_regs[reg];
1310 break;
1311
1312 case VC_ESBC_CFG:
1313 val = xive->vc_regs[reg];
1314 break;
1315
1316 /*
1317 * EAS cache updates (not modeled)
1318 */
1319 case VC_EASC_FLUSH_CTRL:
1320 xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
1321 val = xive->vc_regs[reg];
1322 break;
1323
1324 case VC_ENDC_WATCH_ASSIGN:
1325 val = pnv_xive2_endc_cache_watch_assign(xive);
1326 break;
1327
1328 case VC_ENDC_CFG:
1329 val = xive->vc_regs[reg];
1330 break;
1331
1332 /*
1333 * END cache updates
1334 */
1335 case VC_ENDC_WATCH0_SPEC:
1336 case VC_ENDC_WATCH1_SPEC:
1337 case VC_ENDC_WATCH2_SPEC:
1338 case VC_ENDC_WATCH3_SPEC:
1339 watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
1340 pnv_xive2_endc_cache_watch_release(xive, watch_engine);
1341 val = xive->vc_regs[reg];
1342 break;
1343
1344 case VC_ENDC_WATCH0_DATA0:
1345 case VC_ENDC_WATCH1_DATA0:
1346 case VC_ENDC_WATCH2_DATA0:
1347 case VC_ENDC_WATCH3_DATA0:
1348 /*
1349 * Load DATA registers from cache with data requested by the
1350 * SPEC register. Clear gen_flipped bit in word 1.
1351 */
1352 watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1353 pnv_xive2_end_cache_load(xive, watch_engine);
1354 xive->vc_regs[reg] &= ~(uint64_t)END2_W1_GEN_FLIPPED;
1355 val = xive->vc_regs[reg];
1356 break;
1357
1358 case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1359 case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1360 case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1361 case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1362 val = xive->vc_regs[reg];
1363 break;
1364
1365 case VC_ENDC_FLUSH_CTRL:
1366 xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
1367 val = xive->vc_regs[reg];
1368 break;
1369
1370 /*
1371 * Indirect invalidation
1372 */
1373 case VC_AT_MACRO_KILL_MASK:
1374 val = xive->vc_regs[reg];
1375 break;
1376
1377 case VC_AT_MACRO_KILL:
1378 xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1379 val = xive->vc_regs[reg];
1380 break;
1381
1382 /*
1383 * Interrupt fifo overflow in memory backing store (Not modeled)
1384 */
1385 case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1386 val = xive->vc_regs[reg];
1387 break;
1388
1389 /*
1390 * Synchronisation
1391 */
1392 case VC_ENDC_SYNC_DONE:
1393 val = VC_ENDC_SYNC_POLL_DONE;
1394 break;
1395 default:
1396 xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1397 }
1398
1399 return val;
1400 }
1401
pnv_xive2_ic_vc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1402 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1403 uint64_t val, unsigned size)
1404 {
1405 PnvXive2 *xive = PNV_XIVE2(opaque);
1406 uint32_t reg = offset >> 3;
1407 uint8_t watch_engine;
1408
1409 switch (offset) {
1410 /*
1411 * VSD table settings.
1412 */
1413 case VC_VSD_TABLE_ADDR:
1414 break;
1415 case VC_VSD_TABLE_DATA:
1416 pnv_xive2_vc_vst_set_data(xive, val);
1417 break;
1418
1419 /*
1420 * ESB cache updates (not modeled)
1421 */
1422 case VC_ESBC_FLUSH_CTRL:
1423 if (val & VC_ESBC_FLUSH_CTRL_WANT_CACHE_DISABLE) {
1424 xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx
1425 " value 0x%"PRIx64" bit[2] poll_want_cache_disable",
1426 offset, val);
1427 return;
1428 }
1429 break;
1430 case VC_ESBC_FLUSH_POLL:
1431 xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1432 /* ESB update */
1433 break;
1434
1435 case VC_ESBC_FLUSH_INJECT:
1436 pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ESBC);
1437 break;
1438
1439 case VC_ESBC_CFG:
1440 break;
1441
1442 /*
1443 * EAS cache updates (not modeled)
1444 */
1445 case VC_EASC_FLUSH_CTRL:
1446 if (val & VC_EASC_FLUSH_CTRL_WANT_CACHE_DISABLE) {
1447 xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx
1448 " value 0x%"PRIx64" bit[2] poll_want_cache_disable",
1449 offset, val);
1450 return;
1451 }
1452 break;
1453 case VC_EASC_FLUSH_POLL:
1454 xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1455 /* EAS update */
1456 break;
1457
1458 case VC_EASC_FLUSH_INJECT:
1459 pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_EASC);
1460 break;
1461
1462 case VC_ENDC_CFG:
1463 break;
1464
1465 /*
1466 * END cache updates
1467 */
1468 case VC_ENDC_WATCH0_SPEC:
1469 case VC_ENDC_WATCH1_SPEC:
1470 case VC_ENDC_WATCH2_SPEC:
1471 case VC_ENDC_WATCH3_SPEC:
1472 val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1473 break;
1474
1475 case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1476 case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1477 case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1478 case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1479 break;
1480 case VC_ENDC_WATCH0_DATA0:
1481 case VC_ENDC_WATCH1_DATA0:
1482 case VC_ENDC_WATCH2_DATA0:
1483 case VC_ENDC_WATCH3_DATA0:
1484 /* writing to DATA0 triggers the cache write */
1485 watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1486 xive->vc_regs[reg] = val;
1487 pnv_xive2_end_update(xive, watch_engine);
1488 break;
1489
1490
1491 case VC_ENDC_FLUSH_CTRL:
1492 if (val & VC_ENDC_FLUSH_CTRL_WANT_CACHE_DISABLE) {
1493 xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx
1494 " value 0x%"PRIx64" bit[2] poll_want_cache_disable",
1495 offset, val);
1496 return;
1497 }
1498 break;
1499 case VC_ENDC_FLUSH_POLL:
1500 xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1501 break;
1502
1503 case VC_ENDC_FLUSH_INJECT:
1504 pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ENDC);
1505 break;
1506
1507 /*
1508 * Indirect invalidation
1509 */
1510 case VC_AT_MACRO_KILL:
1511 case VC_AT_MACRO_KILL_MASK:
1512 break;
1513
1514 /*
1515 * Interrupt fifo overflow in memory backing store (Not modeled)
1516 */
1517 case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1518 break;
1519
1520 /*
1521 * Synchronisation
1522 */
1523 case VC_ENDC_SYNC_DONE:
1524 break;
1525
1526 default:
1527 xive2_error(xive, "VC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64,
1528 offset, val);
1529 return;
1530 }
1531
1532 xive->vc_regs[reg] = val;
1533 }
1534
1535 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1536 .read = pnv_xive2_ic_vc_read,
1537 .write = pnv_xive2_ic_vc_write,
1538 .endianness = DEVICE_BIG_ENDIAN,
1539 .valid = {
1540 .min_access_size = 8,
1541 .max_access_size = 8,
1542 },
1543 .impl = {
1544 .min_access_size = 8,
1545 .max_access_size = 8,
1546 },
1547 };
1548
pnv_xive2_nxc_cache_watch_assign(PnvXive2 * xive)1549 static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2 *xive)
1550 {
1551 uint64_t engine_mask = GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN,
1552 xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
1553 uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1554 uint8_t val;
1555
1556 /*
1557 * We keep track of which engines are currently busy in the
1558 * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
1559 * the register, we don't return its value but the ID of an engine
1560 * it can use.
1561 * There are 4 engines. 0xFF means no engine is available.
1562 */
1563 val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1564 if (val != 0xFF) {
1565 xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1566 }
1567 return val;
1568 }
1569
pnv_xive2_nxc_cache_watch_release(PnvXive2 * xive,uint8_t watch_engine)1570 static void pnv_xive2_nxc_cache_watch_release(PnvXive2 *xive,
1571 uint8_t watch_engine)
1572 {
1573 uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1574
1575 pnv_xive2_cache_watch_release(&state, watch_engine);
1576 xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1577 }
1578
pnv_xive2_ic_pc_read(void * opaque,hwaddr offset,unsigned size)1579 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1580 unsigned size)
1581 {
1582 PnvXive2 *xive = PNV_XIVE2(opaque);
1583 uint64_t val = -1;
1584 uint32_t reg = offset >> 3;
1585 uint8_t watch_engine;
1586
1587 switch (offset) {
1588 /*
1589 * VSD table settings.
1590 */
1591 case PC_VSD_TABLE_ADDR:
1592 case PC_VSD_TABLE_DATA:
1593 val = xive->pc_regs[reg];
1594 break;
1595
1596 case PC_NXC_WATCH_ASSIGN:
1597 val = pnv_xive2_nxc_cache_watch_assign(xive);
1598 break;
1599
1600 case PC_NXC_PROC_CONFIG:
1601 val = xive->pc_regs[reg];
1602 break;
1603
1604 /*
1605 * cache updates
1606 */
1607 case PC_NXC_WATCH0_SPEC:
1608 case PC_NXC_WATCH1_SPEC:
1609 case PC_NXC_WATCH2_SPEC:
1610 case PC_NXC_WATCH3_SPEC:
1611 watch_engine = (offset - PC_NXC_WATCH0_SPEC) >> 6;
1612 xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1613 pnv_xive2_nxc_cache_watch_release(xive, watch_engine);
1614 val = xive->pc_regs[reg];
1615 break;
1616
1617 case PC_NXC_WATCH0_DATA0:
1618 case PC_NXC_WATCH1_DATA0:
1619 case PC_NXC_WATCH2_DATA0:
1620 case PC_NXC_WATCH3_DATA0:
1621 /*
1622 * Load DATA registers from cache with data requested by the
1623 * SPEC register
1624 */
1625 watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1626 pnv_xive2_nxc_cache_load(xive, watch_engine);
1627 val = xive->pc_regs[reg];
1628 break;
1629
1630 case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1631 case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1632 case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1633 case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1634 val = xive->pc_regs[reg];
1635 break;
1636
1637 case PC_NXC_FLUSH_CTRL:
1638 xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1639 val = xive->pc_regs[reg];
1640 break;
1641
1642 /*
1643 * Indirect invalidation
1644 */
1645 case PC_AT_KILL:
1646 xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1647 val = xive->pc_regs[reg];
1648 break;
1649
1650 default:
1651 xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1652 }
1653
1654 return val;
1655 }
1656
pnv_xive2_pc_vst_set_data(PnvXive2 * xive,uint64_t vsd)1657 static void pnv_xive2_pc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
1658 {
1659 uint8_t type = GETFIELD(PC_VSD_TABLE_SELECT,
1660 xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1661 uint8_t blk = GETFIELD(PC_VSD_TABLE_ADDRESS,
1662 xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1663
1664 pnv_xive2_vst_set_data(xive, vsd, type, blk);
1665 }
1666
pnv_xive2_ic_pc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1667 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1668 uint64_t val, unsigned size)
1669 {
1670 PnvXive2 *xive = PNV_XIVE2(opaque);
1671 uint32_t reg = offset >> 3;
1672 uint8_t watch_engine;
1673
1674 switch (offset) {
1675
1676 /*
1677 * VSD table settings.
1678 * The Xive2Router model combines both VC and PC sub-engines. We
1679 * allow to configure the tables through both, for the rare cases
1680 * where a table only really needs to be configured for one of
1681 * them (e.g. the NVG table for the presenter). It assumes that
1682 * firmware passes the same address to the VC and PC when tables
1683 * are defined for both, which seems acceptable.
1684 */
1685 case PC_VSD_TABLE_ADDR:
1686 break;
1687 case PC_VSD_TABLE_DATA:
1688 pnv_xive2_pc_vst_set_data(xive, val);
1689 break;
1690
1691 case PC_NXC_PROC_CONFIG:
1692 break;
1693
1694 /*
1695 * cache updates
1696 */
1697 case PC_NXC_WATCH0_SPEC:
1698 case PC_NXC_WATCH1_SPEC:
1699 case PC_NXC_WATCH2_SPEC:
1700 case PC_NXC_WATCH3_SPEC:
1701 val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1702 break;
1703
1704 case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1705 case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1706 case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1707 case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1708 break;
1709 case PC_NXC_WATCH0_DATA0:
1710 case PC_NXC_WATCH1_DATA0:
1711 case PC_NXC_WATCH2_DATA0:
1712 case PC_NXC_WATCH3_DATA0:
1713 /* writing to DATA0 triggers the cache write */
1714 watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1715 xive->pc_regs[reg] = val;
1716 pnv_xive2_nxc_update(xive, watch_engine);
1717 break;
1718
1719 case PC_NXC_FLUSH_CTRL:
1720 if (val & PC_NXC_FLUSH_CTRL_WANT_CACHE_DISABLE) {
1721 xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx
1722 " value 0x%"PRIx64" bit[2] poll_want_cache_disable",
1723 offset, val);
1724 return;
1725 }
1726 break;
1727 case PC_NXC_FLUSH_POLL:
1728 xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1729 break;
1730
1731 case PC_NXC_FLUSH_INJECT:
1732 pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_NXC);
1733 break;
1734
1735 /*
1736 * Indirect invalidation
1737 */
1738 case PC_AT_KILL:
1739 case PC_AT_KILL_MASK:
1740 break;
1741
1742 default:
1743 xive2_error(xive, "PC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64,
1744 offset, val);
1745 return;
1746 }
1747
1748 xive->pc_regs[reg] = val;
1749 }
1750
1751 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1752 .read = pnv_xive2_ic_pc_read,
1753 .write = pnv_xive2_ic_pc_write,
1754 .endianness = DEVICE_BIG_ENDIAN,
1755 .valid = {
1756 .min_access_size = 8,
1757 .max_access_size = 8,
1758 },
1759 .impl = {
1760 .min_access_size = 8,
1761 .max_access_size = 8,
1762 },
1763 };
1764
1765
pnv_xive2_ic_tctxt_read(void * opaque,hwaddr offset,unsigned size)1766 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1767 unsigned size)
1768 {
1769 PnvXive2 *xive = PNV_XIVE2(opaque);
1770 uint64_t val = -1;
1771 uint32_t reg = offset >> 3;
1772
1773 switch (offset) {
1774 /*
1775 * XIVE2 hardware thread enablement
1776 */
1777 case TCTXT_EN0:
1778 case TCTXT_EN1:
1779 val = xive->tctxt_regs[reg];
1780 break;
1781
1782 case TCTXT_EN0_SET:
1783 case TCTXT_EN0_RESET:
1784 val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1785 break;
1786 case TCTXT_EN1_SET:
1787 case TCTXT_EN1_RESET:
1788 val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1789 break;
1790 case TCTXT_CFG:
1791 val = xive->tctxt_regs[reg];
1792 break;
1793 default:
1794 xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1795 }
1796
1797 return val;
1798 }
1799
pnv_xive2_ic_tctxt_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1800 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1801 uint64_t val, unsigned size)
1802 {
1803 PnvXive2 *xive = PNV_XIVE2(opaque);
1804 uint32_t reg = offset >> 3;
1805
1806 switch (offset) {
1807 /*
1808 * XIVE2 hardware thread enablement
1809 */
1810 case TCTXT_EN0: /* Physical Thread Enable */
1811 case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1812 xive->tctxt_regs[reg] = val;
1813 break;
1814
1815 case TCTXT_EN0_SET:
1816 xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1817 break;
1818 case TCTXT_EN1_SET:
1819 xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1820 break;
1821 case TCTXT_EN0_RESET:
1822 xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1823 break;
1824 case TCTXT_EN1_RESET:
1825 xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1826 break;
1827 case TCTXT_CFG:
1828 xive->tctxt_regs[reg] = val;
1829 break;
1830 default:
1831 xive2_error(xive, "TCTXT: invalid write @0x%"HWADDR_PRIx
1832 " data 0x%"PRIx64, offset, val);
1833 return;
1834 }
1835 }
1836
1837 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1838 .read = pnv_xive2_ic_tctxt_read,
1839 .write = pnv_xive2_ic_tctxt_write,
1840 .endianness = DEVICE_BIG_ENDIAN,
1841 .valid = {
1842 .min_access_size = 8,
1843 .max_access_size = 8,
1844 },
1845 .impl = {
1846 .min_access_size = 8,
1847 .max_access_size = 8,
1848 },
1849 };
1850
1851 /*
1852 * Redirect XSCOM to MMIO handlers
1853 */
pnv_xive2_xscom_read(void * opaque,hwaddr offset,unsigned size)1854 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1855 unsigned size)
1856 {
1857 PnvXive2 *xive = PNV_XIVE2(opaque);
1858 uint64_t val = -1;
1859 uint32_t xscom_reg = offset >> 3;
1860 uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1861
1862 switch (xscom_reg) {
1863 case 0x000 ... 0x0FF:
1864 val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1865 break;
1866 case 0x100 ... 0x1FF:
1867 val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1868 break;
1869 case 0x200 ... 0x2FF:
1870 val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1871 break;
1872 case 0x300 ... 0x3FF:
1873 val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1874 break;
1875 default:
1876 xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1877 }
1878
1879 return val;
1880 }
1881
pnv_xive2_xscom_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1882 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1883 uint64_t val, unsigned size)
1884 {
1885 PnvXive2 *xive = PNV_XIVE2(opaque);
1886 uint32_t xscom_reg = offset >> 3;
1887 uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1888
1889 switch (xscom_reg) {
1890 case 0x000 ... 0x0FF:
1891 pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1892 break;
1893 case 0x100 ... 0x1FF:
1894 pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1895 break;
1896 case 0x200 ... 0x2FF:
1897 pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1898 break;
1899 case 0x300 ... 0x3FF:
1900 pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1901 break;
1902 default:
1903 xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx
1904 " value 0x%"PRIx64, offset, val);
1905 }
1906 }
1907
1908 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1909 .read = pnv_xive2_xscom_read,
1910 .write = pnv_xive2_xscom_write,
1911 .endianness = DEVICE_BIG_ENDIAN,
1912 .valid = {
1913 .min_access_size = 8,
1914 .max_access_size = 8,
1915 },
1916 .impl = {
1917 .min_access_size = 8,
1918 .max_access_size = 8,
1919 },
1920 };
1921
1922 /*
1923 * Notify port page. The layout is compatible between 4K and 64K pages :
1924 *
1925 * Page 1 Notify page (writes only)
1926 * 0x000 - 0x7FF IPI interrupt (NPU)
1927 * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB)
1928 */
1929
pnv_xive2_ic_hw_trigger(PnvXive2 * xive,hwaddr addr,uint64_t val)1930 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1931 uint64_t val)
1932 {
1933 uint8_t blk;
1934 uint32_t idx;
1935
1936 if (val & XIVE_TRIGGER_END) {
1937 xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1938 addr, val);
1939 return;
1940 }
1941
1942 /*
1943 * Forward the source event notification directly to the Router.
1944 * The source interrupt number should already be correctly encoded
1945 * with the chip block id by the sending device (PHB, PSI).
1946 */
1947 blk = XIVE_EAS_BLOCK(val);
1948 idx = XIVE_EAS_INDEX(val);
1949
1950 xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1951 !!(val & XIVE_TRIGGER_PQ));
1952 }
1953
pnv_xive2_ic_notify_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1954 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1955 uint64_t val, unsigned size)
1956 {
1957 PnvXive2 *xive = PNV_XIVE2(opaque);
1958
1959 /* VC: IPI triggers */
1960 switch (offset) {
1961 case 0x000 ... 0x7FF:
1962 /* TODO: check IPI notify sub-page routing */
1963 pnv_xive2_ic_hw_trigger(opaque, offset, val);
1964 break;
1965
1966 /* VC: HW triggers */
1967 case 0x800 ... 0xFFF:
1968 pnv_xive2_ic_hw_trigger(opaque, offset, val);
1969 break;
1970
1971 default:
1972 xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx
1973 " value 0x%"PRIx64, offset, val);
1974 }
1975 }
1976
pnv_xive2_ic_notify_read(void * opaque,hwaddr offset,unsigned size)1977 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1978 unsigned size)
1979 {
1980 PnvXive2 *xive = PNV_XIVE2(opaque);
1981
1982 /* loads are invalid */
1983 xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1984 return -1;
1985 }
1986
1987 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1988 .read = pnv_xive2_ic_notify_read,
1989 .write = pnv_xive2_ic_notify_write,
1990 .endianness = DEVICE_BIG_ENDIAN,
1991 .valid = {
1992 .min_access_size = 8,
1993 .max_access_size = 8,
1994 },
1995 .impl = {
1996 .min_access_size = 8,
1997 .max_access_size = 8,
1998 },
1999 };
2000
pnv_xive2_ic_lsi_read(void * opaque,hwaddr offset,unsigned size)2001 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
2002 unsigned size)
2003 {
2004 PnvXive2 *xive = PNV_XIVE2(opaque);
2005
2006 xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
2007 return -1;
2008 }
2009
pnv_xive2_ic_lsi_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2010 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
2011 uint64_t val, unsigned size)
2012 {
2013 PnvXive2 *xive = PNV_XIVE2(opaque);
2014
2015 xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64,
2016 offset, val);
2017 }
2018
2019 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
2020 .read = pnv_xive2_ic_lsi_read,
2021 .write = pnv_xive2_ic_lsi_write,
2022 .endianness = DEVICE_BIG_ENDIAN,
2023 .valid = {
2024 .min_access_size = 8,
2025 .max_access_size = 8,
2026 },
2027 .impl = {
2028 .min_access_size = 8,
2029 .max_access_size = 8,
2030 },
2031 };
2032
2033 /*
2034 * Sync MMIO page (write only)
2035 */
2036 #define PNV_XIVE2_SYNC_IPI 0x000
2037 #define PNV_XIVE2_SYNC_HW 0x080
2038 #define PNV_XIVE2_SYNC_NxC 0x100
2039 #define PNV_XIVE2_SYNC_INT 0x180
2040 #define PNV_XIVE2_SYNC_OS_ESC 0x200
2041 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
2042 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
2043 #define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO 0x800
2044 #define PNV_XIVE2_SYNC_NXC_LD_LCL_CO 0x880
2045 #define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI 0x900
2046 #define PNV_XIVE2_SYNC_NXC_ST_LCL_CI 0x980
2047 #define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI 0xA00
2048 #define PNV_XIVE2_SYNC_NXC_ST_RMT_CI 0xA80
2049
pnv_xive2_ic_sync_read(void * opaque,hwaddr offset,unsigned size)2050 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
2051 unsigned size)
2052 {
2053 PnvXive2 *xive = PNV_XIVE2(opaque);
2054
2055 /* loads are invalid */
2056 xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
2057 return -1;
2058 }
2059
2060 /*
2061 * The sync MMIO space spans two pages. The lower page is use for
2062 * queue sync "poll" requests while the upper page is used for queue
2063 * sync "inject" requests. Inject requests require the HW to write
2064 * a byte of all 1's to a predetermined location in memory in order
2065 * to signal completion of the request. Both pages have the same
2066 * layout, so it is easiest to handle both with a single function.
2067 */
pnv_xive2_ic_sync_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2068 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
2069 uint64_t val, unsigned size)
2070 {
2071 PnvXive2 *xive = PNV_XIVE2(opaque);
2072 int inject_type;
2073 hwaddr pg_offset_mask = (1ull << xive->ic_shift) - 1;
2074
2075 /* adjust offset for inject page */
2076 hwaddr adj_offset = offset & pg_offset_mask;
2077
2078 switch (adj_offset) {
2079 case PNV_XIVE2_SYNC_IPI:
2080 inject_type = PNV_XIVE2_QUEUE_IPI;
2081 break;
2082 case PNV_XIVE2_SYNC_HW:
2083 inject_type = PNV_XIVE2_QUEUE_HW;
2084 break;
2085 case PNV_XIVE2_SYNC_NxC:
2086 inject_type = PNV_XIVE2_QUEUE_NXC;
2087 break;
2088 case PNV_XIVE2_SYNC_INT:
2089 inject_type = PNV_XIVE2_QUEUE_INT;
2090 break;
2091 case PNV_XIVE2_SYNC_OS_ESC:
2092 inject_type = PNV_XIVE2_QUEUE_OS;
2093 break;
2094 case PNV_XIVE2_SYNC_POOL_ESC:
2095 inject_type = PNV_XIVE2_QUEUE_POOL;
2096 break;
2097 case PNV_XIVE2_SYNC_HARD_ESC:
2098 inject_type = PNV_XIVE2_QUEUE_HARD;
2099 break;
2100 case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO:
2101 inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO;
2102 break;
2103 case PNV_XIVE2_SYNC_NXC_LD_LCL_CO:
2104 inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_CO;
2105 break;
2106 case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI:
2107 inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI;
2108 break;
2109 case PNV_XIVE2_SYNC_NXC_ST_LCL_CI:
2110 inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_CI;
2111 break;
2112 case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI:
2113 inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI;
2114 break;
2115 case PNV_XIVE2_SYNC_NXC_ST_RMT_CI:
2116 inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
2117 break;
2118 default:
2119 xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64,
2120 offset, val);
2121 return;
2122 }
2123
2124 /* Write Queue Sync notification byte if writing to sync inject page */
2125 if ((offset & ~pg_offset_mask) != 0) {
2126 pnv_xive2_inject_notify(xive, inject_type);
2127 }
2128 }
2129
2130 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
2131 .read = pnv_xive2_ic_sync_read,
2132 .write = pnv_xive2_ic_sync_write,
2133 .endianness = DEVICE_BIG_ENDIAN,
2134 .valid = {
2135 .min_access_size = 8,
2136 .max_access_size = 8,
2137 },
2138 .impl = {
2139 .min_access_size = 8,
2140 .max_access_size = 8,
2141 },
2142 };
2143
2144 /*
2145 * When the TM direct pages of the IC controller are accessed, the
2146 * target HW thread is deduced from the page offset.
2147 */
pnv_xive2_ic_tm_get_pir(PnvXive2 * xive,hwaddr offset)2148 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
2149 {
2150 /* On P10, the node ID shift in the PIR register is 8 bits */
2151 return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
2152 }
2153
pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 * xive,hwaddr offset)2154 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 *xive,
2155 hwaddr offset)
2156 {
2157 /*
2158 * Indirect TIMA accesses are similar to direct accesses for
2159 * privilege ring 0. So remove any traces of the hw thread ID from
2160 * the offset in the IC BAR as it could be interpreted as the ring
2161 * privilege when calling the underlying direct access functions.
2162 */
2163 return offset & ((1ull << xive->ic_shift) - 1);
2164 }
2165
pnv_xive2_get_indirect_tctx(PnvXive2 * xive,uint32_t pir)2166 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
2167 {
2168 PnvChip *chip = xive->chip;
2169 PowerPCCPU *cpu = NULL;
2170
2171 cpu = pnv_chip_find_cpu(chip, pir);
2172 if (!cpu) {
2173 xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
2174 return NULL;
2175 }
2176
2177 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
2178 xive2_error(xive, "IC: CPU %x is not enabled", pir);
2179 }
2180
2181 return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2182 }
2183
pnv_xive2_ic_tm_indirect_read(void * opaque,hwaddr offset,unsigned size)2184 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
2185 unsigned size)
2186 {
2187 PnvXive2 *xive = PNV_XIVE2(opaque);
2188 XivePresenter *xptr = XIVE_PRESENTER(xive);
2189 hwaddr hw_page_offset;
2190 uint32_t pir;
2191 XiveTCTX *tctx;
2192 uint64_t val = -1;
2193
2194 pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2195 hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2196 tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2197 if (tctx) {
2198 val = xive_tctx_tm_read(xptr, tctx, hw_page_offset, size);
2199 }
2200
2201 return val;
2202 }
2203
pnv_xive2_ic_tm_indirect_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2204 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
2205 uint64_t val, unsigned size)
2206 {
2207 PnvXive2 *xive = PNV_XIVE2(opaque);
2208 XivePresenter *xptr = XIVE_PRESENTER(xive);
2209 hwaddr hw_page_offset;
2210 uint32_t pir;
2211 XiveTCTX *tctx;
2212
2213 pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2214 hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2215 tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2216 if (tctx) {
2217 xive_tctx_tm_write(xptr, tctx, hw_page_offset, val, size);
2218 }
2219 }
2220
2221 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
2222 .read = pnv_xive2_ic_tm_indirect_read,
2223 .write = pnv_xive2_ic_tm_indirect_write,
2224 .endianness = DEVICE_BIG_ENDIAN,
2225 .valid = {
2226 .min_access_size = 1,
2227 .max_access_size = 8,
2228 },
2229 .impl = {
2230 .min_access_size = 1,
2231 .max_access_size = 8,
2232 },
2233 };
2234
2235 /*
2236 * TIMA ops
2237 */
pnv_xive2_tm_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)2238 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
2239 uint64_t value, unsigned size)
2240 {
2241 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2242 PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2243 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2244 XivePresenter *xptr = XIVE_PRESENTER(xive);
2245
2246 xive_tctx_tm_write(xptr, tctx, offset, value, size);
2247 }
2248
pnv_xive2_tm_read(void * opaque,hwaddr offset,unsigned size)2249 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
2250 {
2251 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2252 PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2253 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2254 XivePresenter *xptr = XIVE_PRESENTER(xive);
2255
2256 return xive_tctx_tm_read(xptr, tctx, offset, size);
2257 }
2258
2259 static const MemoryRegionOps pnv_xive2_tm_ops = {
2260 .read = pnv_xive2_tm_read,
2261 .write = pnv_xive2_tm_write,
2262 .endianness = DEVICE_BIG_ENDIAN,
2263 .valid = {
2264 .min_access_size = 1,
2265 .max_access_size = 8,
2266 },
2267 .impl = {
2268 .min_access_size = 1,
2269 .max_access_size = 8,
2270 },
2271 };
2272
pnv_xive2_nvc_read(void * opaque,hwaddr addr,unsigned size)2273 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr addr,
2274 unsigned size)
2275 {
2276 PnvXive2 *xive = PNV_XIVE2(opaque);
2277 XivePresenter *xptr = XIVE_PRESENTER(xive);
2278 uint32_t page = addr >> xive->nvpg_shift;
2279 uint16_t op = addr & 0xFFF;
2280 uint8_t blk = pnv_xive2_block_id(xive);
2281
2282 if (size != 2) {
2283 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc load size %d\n",
2284 size);
2285 return -1;
2286 }
2287
2288 return xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, 1);
2289 }
2290
pnv_xive2_nvc_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)2291 static void pnv_xive2_nvc_write(void *opaque, hwaddr addr,
2292 uint64_t val, unsigned size)
2293 {
2294 PnvXive2 *xive = PNV_XIVE2(opaque);
2295 XivePresenter *xptr = XIVE_PRESENTER(xive);
2296 uint32_t page = addr >> xive->nvc_shift;
2297 uint16_t op = addr & 0xFFF;
2298 uint8_t blk = pnv_xive2_block_id(xive);
2299
2300 if (size != 1) {
2301 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc write size %d\n",
2302 size);
2303 return;
2304 }
2305
2306 (void)xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, val);
2307 }
2308
2309 static const MemoryRegionOps pnv_xive2_nvc_ops = {
2310 .read = pnv_xive2_nvc_read,
2311 .write = pnv_xive2_nvc_write,
2312 .endianness = DEVICE_BIG_ENDIAN,
2313 .valid = {
2314 .min_access_size = 1,
2315 .max_access_size = 8,
2316 },
2317 .impl = {
2318 .min_access_size = 1,
2319 .max_access_size = 8,
2320 },
2321 };
2322
pnv_xive2_nvpg_read(void * opaque,hwaddr addr,unsigned size)2323 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr addr,
2324 unsigned size)
2325 {
2326 PnvXive2 *xive = PNV_XIVE2(opaque);
2327 XivePresenter *xptr = XIVE_PRESENTER(xive);
2328 uint32_t page = addr >> xive->nvpg_shift;
2329 uint16_t op = addr & 0xFFF;
2330 uint32_t index = page >> 1;
2331 uint8_t blk = pnv_xive2_block_id(xive);
2332
2333 if (size != 2) {
2334 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg load size %d\n",
2335 size);
2336 return -1;
2337 }
2338
2339 if (page % 2) {
2340 /* odd page - NVG */
2341 return xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, 1);
2342 } else {
2343 /* even page - NVP */
2344 return xive2_presenter_nvp_backlog_op(xptr, blk, index, op);
2345 }
2346 }
2347
pnv_xive2_nvpg_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)2348 static void pnv_xive2_nvpg_write(void *opaque, hwaddr addr,
2349 uint64_t val, unsigned size)
2350 {
2351 PnvXive2 *xive = PNV_XIVE2(opaque);
2352 XivePresenter *xptr = XIVE_PRESENTER(xive);
2353 uint32_t page = addr >> xive->nvpg_shift;
2354 uint16_t op = addr & 0xFFF;
2355 uint32_t index = page >> 1;
2356 uint8_t blk = pnv_xive2_block_id(xive);
2357
2358 if (size != 1) {
2359 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg write size %d\n",
2360 size);
2361 return;
2362 }
2363
2364 if (page % 2) {
2365 /* odd page - NVG */
2366 (void)xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, val);
2367 } else {
2368 /* even page - NVP */
2369 (void)xive2_presenter_nvp_backlog_op(xptr, blk, index, op);
2370 }
2371 }
2372
2373 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
2374 .read = pnv_xive2_nvpg_read,
2375 .write = pnv_xive2_nvpg_write,
2376 .endianness = DEVICE_BIG_ENDIAN,
2377 .valid = {
2378 .min_access_size = 1,
2379 .max_access_size = 8,
2380 },
2381 .impl = {
2382 .min_access_size = 1,
2383 .max_access_size = 8,
2384 },
2385 };
2386
2387 /*
2388 * POWER10 default capabilities: 0x2000120076f000FC
2389 */
2390 #define PNV_XIVE2_CAPABILITIES 0x2000120076f000FC
2391
2392 /*
2393 * POWER10 default configuration: 0x0030000033000000
2394 *
2395 * 8bits thread id was dropped for P10
2396 */
2397 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
2398
pnv_xive2_reset(void * dev)2399 static void pnv_xive2_reset(void *dev)
2400 {
2401 PnvXive2 *xive = PNV_XIVE2(dev);
2402 XiveSource *xsrc = &xive->ipi_source;
2403 Xive2EndSource *end_xsrc = &xive->end_source;
2404
2405 xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
2406 xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
2407
2408 /* HW hardwires the #Topology of the chip in the block field */
2409 xive->cq_regs[CQ_XIVE_CFG >> 3] |=
2410 SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
2411
2412 /* VC and PC cache watch assign mechanism */
2413 xive->vc_regs[VC_ENDC_CFG >> 3] =
2414 SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN, 0ull, 0b0111);
2415 xive->pc_regs[PC_NXC_PROC_CONFIG >> 3] =
2416 SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN, 0ull, 0b0111);
2417
2418 /* Set default page size to 64k */
2419 xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
2420 xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
2421
2422 /* Clear source MMIOs */
2423 if (memory_region_is_mapped(&xsrc->esb_mmio)) {
2424 memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
2425 }
2426
2427 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
2428 memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
2429 }
2430 }
2431
2432 /*
2433 * Maximum number of IRQs and ENDs supported by HW. Will be tuned by
2434 * software.
2435 */
2436 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2437 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2438
pnv_xive2_realize(DeviceState * dev,Error ** errp)2439 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
2440 {
2441 PnvXive2 *xive = PNV_XIVE2(dev);
2442 PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
2443 XiveSource *xsrc = &xive->ipi_source;
2444 Xive2EndSource *end_xsrc = &xive->end_source;
2445 Error *local_err = NULL;
2446 int i;
2447
2448 pxc->parent_realize(dev, &local_err);
2449 if (local_err) {
2450 error_propagate(errp, local_err);
2451 return;
2452 }
2453
2454 assert(xive->chip);
2455
2456 /*
2457 * The XiveSource and Xive2EndSource objects are realized with the
2458 * maximum allowed HW configuration. The ESB MMIO regions will be
2459 * resized dynamically when the controller is configured by the FW
2460 * to limit accesses to resources not provisioned.
2461 */
2462 object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
2463 &error_fatal);
2464 object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
2465 &error_fatal);
2466 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
2467 &error_fatal);
2468 qdev_realize(DEVICE(xsrc), NULL, &local_err);
2469 if (local_err) {
2470 error_propagate(errp, local_err);
2471 return;
2472 }
2473
2474 object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
2475 &error_fatal);
2476 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
2477 &error_abort);
2478 qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
2479 if (local_err) {
2480 error_propagate(errp, local_err);
2481 return;
2482 }
2483
2484 /* XSCOM region, used for initial configuration of the BARs */
2485 memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
2486 &pnv_xive2_xscom_ops, xive, "xscom-xive",
2487 PNV10_XSCOM_XIVE2_SIZE << 3);
2488
2489 /* Interrupt controller MMIO regions */
2490 xive->ic_shift = 16;
2491 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
2492 PNV10_XIVE2_IC_SIZE);
2493
2494 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
2495 memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
2496 pnv_xive2_ic_regions[i].ops, xive,
2497 pnv_xive2_ic_regions[i].name,
2498 pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
2499 }
2500
2501 /*
2502 * VC MMIO regions.
2503 */
2504 xive->esb_shift = 16;
2505 xive->end_shift = 16;
2506 memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
2507 PNV10_XIVE2_ESB_SIZE);
2508 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
2509 PNV10_XIVE2_END_SIZE);
2510
2511 /* Presenter Controller MMIO region (not modeled) */
2512 xive->nvc_shift = 16;
2513 xive->nvpg_shift = 16;
2514 memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
2515 &pnv_xive2_nvc_ops, xive,
2516 "xive-nvc", PNV10_XIVE2_NVC_SIZE);
2517
2518 memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
2519 &pnv_xive2_nvpg_ops, xive,
2520 "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
2521
2522 /* Thread Interrupt Management Area (Direct) */
2523 xive->tm_shift = 16;
2524 memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
2525 xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
2526
2527 qemu_register_reset(pnv_xive2_reset, dev);
2528 }
2529
2530 static const Property pnv_xive2_properties[] = {
2531 DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
2532 DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
2533 DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
2534 DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
2535 DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
2536 DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
2537 DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
2538 PNV_XIVE2_CAPABILITIES),
2539 DEFINE_PROP_UINT64("config", PnvXive2, config,
2540 PNV_XIVE2_CONFIGURATION),
2541 DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
2542 };
2543
pnv_xive2_instance_init(Object * obj)2544 static void pnv_xive2_instance_init(Object *obj)
2545 {
2546 PnvXive2 *xive = PNV_XIVE2(obj);
2547
2548 object_initialize_child(obj, "ipi_source", &xive->ipi_source,
2549 TYPE_XIVE_SOURCE);
2550 object_initialize_child(obj, "end_source", &xive->end_source,
2551 TYPE_XIVE2_END_SOURCE);
2552 }
2553
pnv_xive2_dt_xscom(PnvXScomInterface * dev,void * fdt,int xscom_offset)2554 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
2555 int xscom_offset)
2556 {
2557 const char compat_p10[] = "ibm,power10-xive-x";
2558 char *name;
2559 int offset;
2560 uint32_t reg[] = {
2561 cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
2562 cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
2563 };
2564
2565 name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
2566 offset = fdt_add_subnode(fdt, xscom_offset, name);
2567 _FDT(offset);
2568 g_free(name);
2569
2570 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2571 _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
2572 sizeof(compat_p10)));
2573 return 0;
2574 }
2575
pnv_xive2_class_init(ObjectClass * klass,const void * data)2576 static void pnv_xive2_class_init(ObjectClass *klass, const void *data)
2577 {
2578 DeviceClass *dc = DEVICE_CLASS(klass);
2579 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2580 Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
2581 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2582 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2583 PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
2584
2585 xdc->dt_xscom = pnv_xive2_dt_xscom;
2586
2587 dc->desc = "PowerNV XIVE2 Interrupt Controller (POWER10)";
2588 device_class_set_parent_realize(dc, pnv_xive2_realize,
2589 &pxc->parent_realize);
2590 device_class_set_props(dc, pnv_xive2_properties);
2591
2592 xrc->get_eas = pnv_xive2_get_eas;
2593 xrc->get_pq = pnv_xive2_get_pq;
2594 xrc->set_pq = pnv_xive2_set_pq;
2595 xrc->get_end = pnv_xive2_get_end;
2596 xrc->write_end = pnv_xive2_write_end;
2597 xrc->get_nvp = pnv_xive2_get_nvp;
2598 xrc->write_nvp = pnv_xive2_write_nvp;
2599 xrc->get_nvgc = pnv_xive2_get_nvgc;
2600 xrc->write_nvgc = pnv_xive2_write_nvgc;
2601 xrc->get_config = pnv_xive2_get_config;
2602 xrc->get_block_id = pnv_xive2_get_block_id;
2603
2604 xnc->notify = pnv_xive2_notify;
2605
2606 xpc->match_nvt = pnv_xive2_match_nvt;
2607 xpc->get_config = pnv_xive2_presenter_get_config;
2608 xpc->broadcast = pnv_xive2_broadcast;
2609 };
2610
2611 static const TypeInfo pnv_xive2_info = {
2612 .name = TYPE_PNV_XIVE2,
2613 .parent = TYPE_XIVE2_ROUTER,
2614 .instance_init = pnv_xive2_instance_init,
2615 .instance_size = sizeof(PnvXive2),
2616 .class_init = pnv_xive2_class_init,
2617 .class_size = sizeof(PnvXive2Class),
2618 .interfaces = (const InterfaceInfo[]) {
2619 { TYPE_PNV_XSCOM_INTERFACE },
2620 { }
2621 }
2622 };
2623
pnv_xive2_register_types(void)2624 static void pnv_xive2_register_types(void)
2625 {
2626 type_register_static(&pnv_xive2_info);
2627 }
2628
type_init(pnv_xive2_register_types)2629 type_init(pnv_xive2_register_types)
2630
2631 /*
2632 * If the table is direct, we can compute the number of PQ entries
2633 * provisioned by FW.
2634 */
2635 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2636 {
2637 uint8_t blk = pnv_xive2_block_id(xive);
2638 uint64_t vsd = xive->vsds[VST_ESB][blk];
2639 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2640
2641 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2642 }
2643
2644 /*
2645 * Compute the number of entries per indirect subpage.
2646 */
pnv_xive2_vst_per_subpage(PnvXive2 * xive,uint32_t type)2647 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2648 {
2649 uint8_t blk = pnv_xive2_block_id(xive);
2650 uint64_t vsd = xive->vsds[type][blk];
2651 const XiveVstInfo *info = &vst_infos[type];
2652 uint64_t vsd_addr;
2653 uint32_t page_shift;
2654
2655 /* For direct tables, fake a valid value */
2656 if (!(VSD_INDIRECT & vsd)) {
2657 return 1;
2658 }
2659
2660 /* Get the page size of the indirect table. */
2661 vsd_addr = vsd & VSD_ADDRESS_MASK;
2662 ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2663
2664 if (!(vsd & VSD_ADDRESS_MASK)) {
2665 #ifdef XIVE2_DEBUG
2666 xive2_error(xive, "VST: invalid %s entry!?", info->name);
2667 #endif
2668 return 0;
2669 }
2670
2671 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2672
2673 if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2674 xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2675 page_shift);
2676 return 0;
2677 }
2678
2679 return (1ull << page_shift) / info->size;
2680 }
2681
pnv_xive2_pic_print_info(PnvXive2 * xive,GString * buf)2682 void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
2683 {
2684 Xive2Router *xrtr = XIVE2_ROUTER(xive);
2685 uint8_t blk = pnv_xive2_block_id(xive);
2686 uint8_t chip_id = xive->chip->chip_id;
2687 uint32_t srcno0 = XIVE_EAS(blk, 0);
2688 uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2689 Xive2Eas eas;
2690 Xive2End end;
2691 Xive2Nvp nvp;
2692 Xive2Nvgc nvgc;
2693 int i;
2694 uint64_t entries_per_subpage;
2695
2696 g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
2697 blk, srcno0, srcno0 + nr_esbs - 1);
2698 xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
2699
2700 g_string_append_printf(buf, "XIVE[%x] EAT %08x .. %08x\n",
2701 blk, srcno0, srcno0 + nr_esbs - 1);
2702 for (i = 0; i < nr_esbs; i++) {
2703 if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2704 break;
2705 }
2706 if (!xive2_eas_is_masked(&eas)) {
2707 xive2_eas_pic_print_info(&eas, i, buf);
2708 }
2709 }
2710
2711 g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
2712 chip_id, blk);
2713 i = 0;
2714 while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2715 xive2_end_eas_pic_print_info(&end, i++, buf);
2716 }
2717
2718 g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2719 i = 0;
2720 while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2721 xive2_end_pic_print_info(&end, i++, buf);
2722 }
2723
2724 g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
2725 chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2726 entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2727 for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
2728 while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2729 xive2_nvp_pic_print_info(&nvp, i++, buf);
2730 }
2731 }
2732
2733 g_string_append_printf(buf, "XIVE[%x] #%d NVGT %08x .. %08x\n",
2734 chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2735 entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVG);
2736 for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
2737 while (!xive2_router_get_nvgc(xrtr, false, blk, i, &nvgc)) {
2738 xive2_nvgc_pic_print_info(&nvgc, i++, buf);
2739 }
2740 }
2741
2742 g_string_append_printf(buf, "XIVE[%x] #%d NVCT %08x .. %08x\n",
2743 chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2744 entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVC);
2745 for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
2746 while (!xive2_router_get_nvgc(xrtr, true, blk, i, &nvgc)) {
2747 xive2_nvgc_pic_print_info(&nvgc, i++, buf);
2748 }
2749 }
2750 }
2751