1 /*
2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
3 *
4 * Copyright (c) 2019-2022, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "hw/ppc/fdt.h"
17 #include "hw/ppc/pnv.h"
18 #include "hw/ppc/pnv_chip.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/qtest.h"
29
30 #include <libfdt.h>
31
32 #include "pnv_xive2_regs.h"
33
34 #undef XIVE2_DEBUG
35
36 /* XIVE Sync or Flush Notification Block */
37 typedef struct XiveSfnBlock {
38 uint8_t bytes[32];
39 } XiveSfnBlock;
40
41 /* XIVE Thread Sync or Flush Notification Area */
42 typedef struct XiveThreadNA {
43 XiveSfnBlock topo[16];
44 } XiveThreadNA;
45
46 /*
47 * Virtual structures table (VST)
48 */
49 #define SBE_PER_BYTE 4
50
51 typedef struct XiveVstInfo {
52 const char *name;
53 uint32_t size;
54 uint32_t max_blocks;
55 } XiveVstInfo;
56
57 static const XiveVstInfo vst_infos[] = {
58
59 [VST_EAS] = { "EAT", sizeof(Xive2Eas), 16 },
60 [VST_ESB] = { "ESB", 1, 16 },
61 [VST_END] = { "ENDT", sizeof(Xive2End), 16 },
62
63 [VST_NVP] = { "NVPT", sizeof(Xive2Nvp), 16 },
64 [VST_NVG] = { "NVGT", sizeof(Xive2Nvgc), 16 },
65 [VST_NVC] = { "NVCT", sizeof(Xive2Nvgc), 16 },
66
67 [VST_IC] = { "IC", 1, /* ? */ 16 }, /* Topology # */
68 [VST_SYNC] = { "SYNC", sizeof(XiveThreadNA), 16 }, /* Topology # */
69
70 /*
71 * This table contains the backing store pages for the interrupt
72 * fifos of the VC sub-engine in case of overflow.
73 *
74 * 0 - IPI,
75 * 1 - HWD,
76 * 2 - NxC,
77 * 3 - INT,
78 * 4 - OS-Queue,
79 * 5 - Pool-Queue,
80 * 6 - Hard-Queue
81 */
82 [VST_ERQ] = { "ERQ", 1, VC_QUEUE_COUNT },
83 };
84
85 #define xive2_error(xive, fmt, ...) \
86 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
87 (xive)->chip->chip_id, ## __VA_ARGS__);
88
89 /*
90 * TODO: Document block id override
91 */
pnv_xive2_block_id(PnvXive2 * xive)92 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
93 {
94 uint8_t blk = xive->chip->chip_id;
95 uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
96
97 if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
98 blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
99 }
100
101 return blk;
102 }
103
104 /*
105 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
106 * of the chips is good enough.
107 *
108 * TODO: Block scope support
109 */
pnv_xive2_get_remote(uint8_t blk)110 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
111 {
112 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
113 int i;
114
115 for (i = 0; i < pnv->num_chips; i++) {
116 Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
117 PnvXive2 *xive = &chip10->xive;
118
119 if (pnv_xive2_block_id(xive) == blk) {
120 return xive;
121 }
122 }
123 return NULL;
124 }
125
126 /*
127 * VST accessors for ESB, EAT, ENDT, NVP
128 *
129 * Indirect VST tables are arrays of VSDs pointing to a page (of same
130 * size). Each page is a direct VST table.
131 */
132
133 #define XIVE_VSD_SIZE 8
134
135 /* Indirect page size can be 4K, 64K, 2M, 16M. */
pnv_xive2_vst_page_size_allowed(uint32_t page_shift)136 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
137 {
138 return page_shift == 12 || page_shift == 16 ||
139 page_shift == 21 || page_shift == 24;
140 }
141
pnv_xive2_vst_addr_direct(PnvXive2 * xive,uint32_t type,uint64_t vsd,uint32_t idx)142 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
143 uint64_t vsd, uint32_t idx)
144 {
145 const XiveVstInfo *info = &vst_infos[type];
146 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
147 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
148 uint32_t idx_max;
149
150 idx_max = vst_tsize / info->size - 1;
151 if (idx > idx_max) {
152 #ifdef XIVE2_DEBUG
153 xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
154 info->name, idx, idx_max);
155 #endif
156 return 0;
157 }
158
159 return vst_addr + idx * info->size;
160 }
161
pnv_xive2_vst_addr_indirect(PnvXive2 * xive,uint32_t type,uint64_t vsd,uint32_t idx)162 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
163 uint64_t vsd, uint32_t idx)
164 {
165 const XiveVstInfo *info = &vst_infos[type];
166 uint64_t vsd_addr;
167 uint32_t vsd_idx;
168 uint32_t page_shift;
169 uint32_t vst_per_page;
170
171 /* Get the page size of the indirect table. */
172 vsd_addr = vsd & VSD_ADDRESS_MASK;
173 ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
174
175 if (!(vsd & VSD_ADDRESS_MASK)) {
176 #ifdef XIVE2_DEBUG
177 xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
178 #endif
179 return 0;
180 }
181
182 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
183
184 if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
185 xive2_error(xive, "VST: invalid %s page shift %d", info->name,
186 page_shift);
187 return 0;
188 }
189
190 vst_per_page = (1ull << page_shift) / info->size;
191 vsd_idx = idx / vst_per_page;
192
193 /* Load the VSD we are looking for, if not already done */
194 if (vsd_idx) {
195 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
196 ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
197 MEMTXATTRS_UNSPECIFIED);
198
199 if (!(vsd & VSD_ADDRESS_MASK)) {
200 #ifdef XIVE2_DEBUG
201 xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
202 #endif
203 return 0;
204 }
205
206 /*
207 * Check that the pages have a consistent size across the
208 * indirect table
209 */
210 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
211 xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
212 info->name, idx);
213 return 0;
214 }
215 }
216
217 return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
218 }
219
pnv_xive2_nvc_table_compress_shift(PnvXive2 * xive)220 static uint8_t pnv_xive2_nvc_table_compress_shift(PnvXive2 *xive)
221 {
222 uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS,
223 xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
224 return shift > 8 ? 0 : shift;
225 }
226
pnv_xive2_nvg_table_compress_shift(PnvXive2 * xive)227 static uint8_t pnv_xive2_nvg_table_compress_shift(PnvXive2 *xive)
228 {
229 uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS,
230 xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
231 return shift > 8 ? 0 : shift;
232 }
233
pnv_xive2_vst_addr(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx)234 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
235 uint32_t idx)
236 {
237 const XiveVstInfo *info = &vst_infos[type];
238 uint64_t vsd;
239
240 if (blk >= info->max_blocks) {
241 xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
242 blk, info->name, idx);
243 return 0;
244 }
245
246 vsd = xive->vsds[type][blk];
247 if (vsd == 0) {
248 xive2_error(xive, "VST: vsd == 0 block id %d for VST %s %d !?",
249 blk, info->name, idx);
250 return 0;
251 }
252
253 /* Remote VST access */
254 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
255 xive = pnv_xive2_get_remote(blk);
256
257 return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
258 }
259
260 if (type == VST_NVG) {
261 idx >>= pnv_xive2_nvg_table_compress_shift(xive);
262 } else if (type == VST_NVC) {
263 idx >>= pnv_xive2_nvc_table_compress_shift(xive);
264 }
265
266 if (VSD_INDIRECT & vsd) {
267 return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
268 }
269
270 return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
271 }
272
pnv_xive2_vst_read(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data)273 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
274 uint32_t idx, void *data)
275 {
276 const XiveVstInfo *info = &vst_infos[type];
277 uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
278 MemTxResult result;
279
280 if (!addr) {
281 return -1;
282 }
283
284 result = address_space_read(&address_space_memory, addr,
285 MEMTXATTRS_UNSPECIFIED, data,
286 info->size);
287 if (result != MEMTX_OK) {
288 xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
289 " for VST %s %x/%x\n", addr, info->name, blk, idx);
290 return -1;
291 }
292 return 0;
293 }
294
295 #define XIVE_VST_WORD_ALL -1
296
pnv_xive2_vst_write(PnvXive2 * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data,uint32_t word_number)297 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
298 uint32_t idx, void *data, uint32_t word_number)
299 {
300 const XiveVstInfo *info = &vst_infos[type];
301 uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
302 MemTxResult result;
303
304 if (!addr) {
305 return -1;
306 }
307
308 if (word_number == XIVE_VST_WORD_ALL) {
309 result = address_space_write(&address_space_memory, addr,
310 MEMTXATTRS_UNSPECIFIED, data,
311 info->size);
312 } else {
313 result = address_space_write(&address_space_memory,
314 addr + word_number * 4,
315 MEMTXATTRS_UNSPECIFIED,
316 data + word_number * 4, 4);
317 }
318
319 if (result != MEMTX_OK) {
320 xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
321 "for VST %s %x/%x\n", addr, info->name, blk, idx);
322 return -1;
323 }
324 return 0;
325 }
326
pnv_xive2_get_pq(Xive2Router * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)327 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
328 uint8_t *pq)
329 {
330 PnvXive2 *xive = PNV_XIVE2(xrtr);
331
332 if (pnv_xive2_block_id(xive) != blk) {
333 xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
334 return -1;
335 }
336
337 *pq = xive_source_esb_get(&xive->ipi_source, idx);
338 return 0;
339 }
340
pnv_xive2_set_pq(Xive2Router * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)341 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
342 uint8_t *pq)
343 {
344 PnvXive2 *xive = PNV_XIVE2(xrtr);
345
346 if (pnv_xive2_block_id(xive) != blk) {
347 xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
348 return -1;
349 }
350
351 *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
352 return 0;
353 }
354
pnv_xive2_get_end(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2End * end)355 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
356 Xive2End *end)
357 {
358 return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
359 }
360
pnv_xive2_write_end(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2End * end,uint8_t word_number)361 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
362 Xive2End *end, uint8_t word_number)
363 {
364 return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
365 word_number);
366 }
367
pnv_xive2_get_current_pir(PnvXive2 * xive)368 static inline int pnv_xive2_get_current_pir(PnvXive2 *xive)
369 {
370 if (!qtest_enabled()) {
371 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
372 return ppc_cpu_pir(cpu);
373 }
374 return 0;
375 }
376
377 /*
378 * After SW injects a Queue Sync or Cache Flush operation, HW will notify
379 * SW of the completion of the operation by writing a byte of all 1's (0xff)
380 * to a specific memory location. The memory location is calculated by first
381 * looking up a base address in the SYNC VSD using the Topology ID of the
382 * originating thread as the "block" number. This points to a
383 * 64k block of memory that is further divided into 128 512 byte chunks of
384 * memory, which is indexed by the thread id of the requesting thread.
385 * Finally, this 512 byte chunk of memory is divided into 16 32 byte
386 * chunks which are indexed by the topology id of the targeted IC's chip.
387 * The values below are the offsets into that 32 byte chunk of memory for
388 * each type of cache flush or queue sync operation.
389 */
390 #define PNV_XIVE2_QUEUE_IPI 0x00
391 #define PNV_XIVE2_QUEUE_HW 0x01
392 #define PNV_XIVE2_QUEUE_NXC 0x02
393 #define PNV_XIVE2_QUEUE_INT 0x03
394 #define PNV_XIVE2_QUEUE_OS 0x04
395 #define PNV_XIVE2_QUEUE_POOL 0x05
396 #define PNV_XIVE2_QUEUE_HARD 0x06
397 #define PNV_XIVE2_CACHE_ENDC 0x08
398 #define PNV_XIVE2_CACHE_ESBC 0x09
399 #define PNV_XIVE2_CACHE_EASC 0x0a
400 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO 0x10
401 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO 0x11
402 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI 0x12
403 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI 0x13
404 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI 0x14
405 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI 0x15
406 #define PNV_XIVE2_CACHE_NXC 0x18
407
pnv_xive2_inject_notify(PnvXive2 * xive,int type)408 static int pnv_xive2_inject_notify(PnvXive2 *xive, int type)
409 {
410 uint64_t addr;
411 int pir = pnv_xive2_get_current_pir(xive);
412 int thread_nr = PNV10_PIR2THREAD(pir);
413 int thread_topo_id = PNV10_PIR2CHIP(pir);
414 int ic_topo_id = xive->chip->chip_id;
415 uint64_t offset = ic_topo_id * sizeof(XiveSfnBlock);
416 uint8_t byte = 0xff;
417 MemTxResult result;
418
419 /* Retrieve the address of requesting thread's notification area */
420 addr = pnv_xive2_vst_addr(xive, VST_SYNC, thread_topo_id, thread_nr);
421
422 if (!addr) {
423 xive2_error(xive, "VST: no SYNC entry %x/%x !?",
424 thread_topo_id, thread_nr);
425 return -1;
426 }
427
428 address_space_stb(&address_space_memory, addr + offset + type, byte,
429 MEMTXATTRS_UNSPECIFIED, &result);
430 assert(result == MEMTX_OK);
431
432 return 0;
433 }
434
pnv_xive2_end_update(PnvXive2 * xive,uint8_t watch_engine)435 static int pnv_xive2_end_update(PnvXive2 *xive, uint8_t watch_engine)
436 {
437 uint8_t blk;
438 uint32_t idx;
439 int i, spec_reg, data_reg;
440 uint64_t endc_watch[4];
441
442 assert(watch_engine < ARRAY_SIZE(endc_watch));
443
444 spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
445 data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
446 blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
447 idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
448
449 for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
450 endc_watch[i] = cpu_to_be64(xive->vc_regs[data_reg + i]);
451 }
452
453 return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
454 XIVE_VST_WORD_ALL);
455 }
456
pnv_xive2_end_cache_load(PnvXive2 * xive,uint8_t watch_engine)457 static void pnv_xive2_end_cache_load(PnvXive2 *xive, uint8_t watch_engine)
458 {
459 uint8_t blk;
460 uint32_t idx;
461 uint64_t endc_watch[4] = { 0 };
462 int i, spec_reg, data_reg;
463
464 assert(watch_engine < ARRAY_SIZE(endc_watch));
465
466 spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
467 data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
468 blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
469 idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
470
471 if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
472 xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
473 }
474
475 for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
476 xive->vc_regs[data_reg + i] = be64_to_cpu(endc_watch[i]);
477 }
478 }
479
pnv_xive2_get_nvp(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Nvp * nvp)480 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
481 Xive2Nvp *nvp)
482 {
483 return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
484 }
485
pnv_xive2_write_nvp(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Nvp * nvp,uint8_t word_number)486 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
487 Xive2Nvp *nvp, uint8_t word_number)
488 {
489 return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
490 word_number);
491 }
492
pnv_xive2_get_nvgc(Xive2Router * xrtr,bool crowd,uint8_t blk,uint32_t idx,Xive2Nvgc * nvgc)493 static int pnv_xive2_get_nvgc(Xive2Router *xrtr, bool crowd,
494 uint8_t blk, uint32_t idx,
495 Xive2Nvgc *nvgc)
496 {
497 return pnv_xive2_vst_read(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
498 blk, idx, nvgc);
499 }
500
pnv_xive2_write_nvgc(Xive2Router * xrtr,bool crowd,uint8_t blk,uint32_t idx,Xive2Nvgc * nvgc)501 static int pnv_xive2_write_nvgc(Xive2Router *xrtr, bool crowd,
502 uint8_t blk, uint32_t idx,
503 Xive2Nvgc *nvgc)
504 {
505 return pnv_xive2_vst_write(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
506 blk, idx, nvgc,
507 XIVE_VST_WORD_ALL);
508 }
509
pnv_xive2_nxc_to_table_type(uint8_t nxc_type,uint32_t * table_type)510 static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
511 {
512 switch (nxc_type) {
513 case PC_NXC_WATCH_NXC_NVP:
514 *table_type = VST_NVP;
515 break;
516 case PC_NXC_WATCH_NXC_NVG:
517 *table_type = VST_NVG;
518 break;
519 case PC_NXC_WATCH_NXC_NVC:
520 *table_type = VST_NVC;
521 break;
522 default:
523 qemu_log_mask(LOG_GUEST_ERROR,
524 "XIVE: invalid table type for nxc operation\n");
525 return -1;
526 }
527 return 0;
528 }
529
pnv_xive2_nxc_update(PnvXive2 * xive,uint8_t watch_engine)530 static int pnv_xive2_nxc_update(PnvXive2 *xive, uint8_t watch_engine)
531 {
532 uint8_t blk, nxc_type;
533 uint32_t idx, table_type = -1;
534 int i, spec_reg, data_reg;
535 uint64_t nxc_watch[4];
536
537 assert(watch_engine < ARRAY_SIZE(nxc_watch));
538
539 spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
540 data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
541 nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
542 blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
543 idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
544
545 assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
546
547 for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
548 nxc_watch[i] = cpu_to_be64(xive->pc_regs[data_reg + i]);
549 }
550
551 return pnv_xive2_vst_write(xive, table_type, blk, idx, nxc_watch,
552 XIVE_VST_WORD_ALL);
553 }
554
pnv_xive2_nxc_cache_load(PnvXive2 * xive,uint8_t watch_engine)555 static void pnv_xive2_nxc_cache_load(PnvXive2 *xive, uint8_t watch_engine)
556 {
557 uint8_t blk, nxc_type;
558 uint32_t idx, table_type = -1;
559 uint64_t nxc_watch[4] = { 0 };
560 int i, spec_reg, data_reg;
561
562 assert(watch_engine < ARRAY_SIZE(nxc_watch));
563
564 spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
565 data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
566 nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
567 blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
568 idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
569
570 assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
571
572 if (pnv_xive2_vst_read(xive, table_type, blk, idx, nxc_watch)) {
573 xive2_error(xive, "VST: no NXC entry %x/%x in %s table!?",
574 blk, idx, vst_infos[table_type].name);
575 }
576
577 for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
578 xive->pc_regs[data_reg + i] = be64_to_cpu(nxc_watch[i]);
579 }
580 }
581
pnv_xive2_get_eas(Xive2Router * xrtr,uint8_t blk,uint32_t idx,Xive2Eas * eas)582 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
583 Xive2Eas *eas)
584 {
585 PnvXive2 *xive = PNV_XIVE2(xrtr);
586
587 if (pnv_xive2_block_id(xive) != blk) {
588 xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
589 return -1;
590 }
591
592 return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
593 }
594
pnv_xive2_get_config(Xive2Router * xrtr)595 static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
596 {
597 PnvXive2 *xive = PNV_XIVE2(xrtr);
598 uint32_t cfg = 0;
599
600 if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
601 cfg |= XIVE2_GEN1_TIMA_OS;
602 }
603
604 if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
605 cfg |= XIVE2_VP_SAVE_RESTORE;
606 }
607
608 if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
609 xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
610 cfg |= XIVE2_THREADID_8BITS;
611 }
612
613 return cfg;
614 }
615
pnv_xive2_is_cpu_enabled(PnvXive2 * xive,PowerPCCPU * cpu)616 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
617 {
618 int pir = ppc_cpu_pir(cpu);
619 uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
620 uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
621 uint32_t bit = pir & 0x3f;
622
623 return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
624 }
625
pnv_xive2_match_nvt(XivePresenter * xptr,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool cam_ignore,uint8_t priority,uint32_t logic_serv,XiveTCTXMatch * match)626 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
627 uint8_t nvt_blk, uint32_t nvt_idx,
628 bool cam_ignore, uint8_t priority,
629 uint32_t logic_serv, XiveTCTXMatch *match)
630 {
631 PnvXive2 *xive = PNV_XIVE2(xptr);
632 PnvChip *chip = xive->chip;
633 int count = 0;
634 int i, j;
635 bool gen1_tima_os =
636 xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
637
638 for (i = 0; i < chip->nr_cores; i++) {
639 PnvCore *pc = chip->cores[i];
640 CPUCore *cc = CPU_CORE(pc);
641
642 for (j = 0; j < cc->nr_threads; j++) {
643 PowerPCCPU *cpu = pc->threads[j];
644 XiveTCTX *tctx;
645 int ring;
646
647 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
648 continue;
649 }
650
651 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
652
653 if (gen1_tima_os) {
654 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
655 nvt_idx, cam_ignore,
656 logic_serv);
657 } else {
658 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
659 nvt_idx, cam_ignore,
660 logic_serv);
661 }
662
663 /*
664 * Save the context and follow on to catch duplicates,
665 * that we don't support yet.
666 */
667 if (ring != -1) {
668 if (match->tctx) {
669 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
670 "thread context NVT %x/%x\n",
671 nvt_blk, nvt_idx);
672 return false;
673 }
674
675 match->ring = ring;
676 match->tctx = tctx;
677 count++;
678 }
679 }
680 }
681
682 return count;
683 }
684
pnv_xive2_presenter_get_config(XivePresenter * xptr)685 static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
686 {
687 PnvXive2 *xive = PNV_XIVE2(xptr);
688 uint32_t cfg = 0;
689
690 if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
691 cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
692 }
693 return cfg;
694 }
695
pnv_xive2_get_block_id(Xive2Router * xrtr)696 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
697 {
698 return pnv_xive2_block_id(PNV_XIVE2(xrtr));
699 }
700
701 /*
702 * The TIMA MMIO space is shared among the chips and to identify the
703 * chip from which the access is being done, we extract the chip id
704 * from the PIR.
705 */
pnv_xive2_tm_get_xive(PowerPCCPU * cpu)706 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
707 {
708 int pir = ppc_cpu_pir(cpu);
709 XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
710 PnvXive2 *xive = PNV_XIVE2(xptr);
711
712 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
713 xive2_error(xive, "IC: CPU %x is not enabled", pir);
714 }
715 return xive;
716 }
717
718 /*
719 * The internal sources of the interrupt controller have no knowledge
720 * of the XIVE2 chip on which they reside. Encode the block id in the
721 * source interrupt number before forwarding the source event
722 * notification to the Router. This is required on a multichip system.
723 */
pnv_xive2_notify(XiveNotifier * xn,uint32_t srcno,bool pq_checked)724 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
725 {
726 PnvXive2 *xive = PNV_XIVE2(xn);
727 uint8_t blk = pnv_xive2_block_id(xive);
728
729 xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
730 }
731
732 /*
733 * Set Translation Tables
734 *
735 * TODO add support for multiple sets
736 */
pnv_xive2_stt_set_data(PnvXive2 * xive,uint64_t val)737 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
738 {
739 uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
740 uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
741 xive->cq_regs[CQ_TAR >> 3]);
742
743 switch (tsel) {
744 case CQ_TAR_NVPG:
745 case CQ_TAR_ESB:
746 case CQ_TAR_END:
747 case CQ_TAR_NVC:
748 xive->tables[tsel][entry] = val;
749 break;
750 default:
751 xive2_error(xive, "IC: unsupported table %d", tsel);
752 return -1;
753 }
754
755 if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
756 xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
757 xive->cq_regs[CQ_TAR >> 3], ++entry);
758 }
759
760 return 0;
761 }
762 /*
763 * Virtual Structure Tables (VST) configuration
764 */
pnv_xive2_vst_set_exclusive(PnvXive2 * xive,uint8_t type,uint8_t blk,uint64_t vsd)765 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
766 uint8_t blk, uint64_t vsd)
767 {
768 Xive2EndSource *end_xsrc = &xive->end_source;
769 XiveSource *xsrc = &xive->ipi_source;
770 const XiveVstInfo *info = &vst_infos[type];
771 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
772 uint64_t vst_tsize = 1ull << page_shift;
773 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
774
775 /* Basic checks */
776
777 if (VSD_INDIRECT & vsd) {
778 if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
779 xive2_error(xive, "VST: invalid %s page shift %d", info->name,
780 page_shift);
781 return;
782 }
783 }
784
785 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
786 xive2_error(xive, "VST: %s table address 0x%"PRIx64
787 " is not aligned with page shift %d",
788 info->name, vst_addr, page_shift);
789 return;
790 }
791
792 /* Record the table configuration (in SRAM on HW) */
793 xive->vsds[type][blk] = vsd;
794
795 /* Now tune the models with the configuration provided by the FW */
796
797 switch (type) {
798 case VST_ESB:
799 /*
800 * Backing store pages for the source PQ bits. The model does
801 * not use these PQ bits backed in RAM because the XiveSource
802 * model has its own.
803 *
804 * If the table is direct, we can compute the number of PQ
805 * entries provisioned by FW (such as skiboot) and resize the
806 * ESB window accordingly.
807 */
808 if (memory_region_is_mapped(&xsrc->esb_mmio)) {
809 memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
810 }
811 if (!(VSD_INDIRECT & vsd)) {
812 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
813 * (1ull << xsrc->esb_shift));
814 }
815
816 memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
817 break;
818
819 case VST_EAS: /* Nothing to be done */
820 break;
821
822 case VST_END:
823 /*
824 * Backing store pages for the END.
825 */
826 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
827 memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
828 }
829 if (!(VSD_INDIRECT & vsd)) {
830 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
831 * (1ull << end_xsrc->esb_shift));
832 }
833 memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
834 break;
835
836 case VST_NVP: /* Not modeled */
837 case VST_NVG: /* Not modeled */
838 case VST_NVC: /* Not modeled */
839 case VST_IC: /* Not modeled */
840 case VST_SYNC: /* Not modeled */
841 case VST_ERQ: /* Not modeled */
842 break;
843
844 default:
845 g_assert_not_reached();
846 }
847 }
848
849 /*
850 * Both PC and VC sub-engines are configured as each use the Virtual
851 * Structure Tables
852 */
pnv_xive2_vst_set_data(PnvXive2 * xive,uint64_t vsd,uint8_t type,uint8_t blk)853 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd,
854 uint8_t type, uint8_t blk)
855 {
856 uint8_t mode = GETFIELD(VSD_MODE, vsd);
857 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
858
859 if (type > VST_ERQ) {
860 xive2_error(xive, "VST: invalid table type %d", type);
861 return;
862 }
863
864 if (blk >= vst_infos[type].max_blocks) {
865 xive2_error(xive, "VST: invalid block id %d for"
866 " %s table", blk, vst_infos[type].name);
867 return;
868 }
869
870 if (!vst_addr) {
871 xive2_error(xive, "VST: invalid %s table address",
872 vst_infos[type].name);
873 return;
874 }
875
876 switch (mode) {
877 case VSD_MODE_FORWARD:
878 xive->vsds[type][blk] = vsd;
879 break;
880
881 case VSD_MODE_EXCLUSIVE:
882 pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
883 break;
884
885 default:
886 xive2_error(xive, "VST: unsupported table mode %d", mode);
887 return;
888 }
889 }
890
pnv_xive2_vc_vst_set_data(PnvXive2 * xive,uint64_t vsd)891 static void pnv_xive2_vc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
892 {
893 uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
894 xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
895 uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
896 xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
897
898 pnv_xive2_vst_set_data(xive, vsd, type, blk);
899 }
900
901 /*
902 * MMIO handlers
903 */
904
905
906 /*
907 * IC BAR layout
908 *
909 * Page 0: Internal CQ register accesses (reads & writes)
910 * Page 1: Internal PC register accesses (reads & writes)
911 * Page 2: Internal VC register accesses (reads & writes)
912 * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
913 * Page 4: Notify Port page (writes only, w/data),
914 * Page 5: Reserved
915 * Page 6: Sync Poll page (writes only, dataless)
916 * Page 7: Sync Inject page (writes only, dataless)
917 * Page 8: LSI Trigger page (writes only, dataless)
918 * Page 9: LSI SB Management page (reads & writes dataless)
919 * Pages 10-255: Reserved
920 * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
921 * covering the 128 threads in P10.
922 * Pages 384-511: Reserved
923 */
924 typedef struct PnvXive2Region {
925 const char *name;
926 uint32_t pgoff;
927 uint32_t pgsize;
928 const MemoryRegionOps *ops;
929 } PnvXive2Region;
930
931 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
932 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
933 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
934 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
935 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
936 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
937 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
938 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
939
940 /* 512 pages. 4K: 2M range, 64K: 32M range */
941 static const PnvXive2Region pnv_xive2_ic_regions[] = {
942 { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops },
943 { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops },
944 { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops },
945 { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops },
946 { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops },
947 /* page 5 reserved */
948 { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops },
949 { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops },
950 /* pages 10-255 reserved */
951 { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops },
952 /* pages 384-511 reserved */
953 };
954
955 /*
956 * CQ operations
957 */
958
pnv_xive2_ic_cq_read(void * opaque,hwaddr offset,unsigned size)959 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
960 unsigned size)
961 {
962 PnvXive2 *xive = PNV_XIVE2(opaque);
963 uint32_t reg = offset >> 3;
964 uint64_t val = 0;
965
966 switch (offset) {
967 case CQ_XIVE_CAP: /* Set at reset */
968 case CQ_XIVE_CFG:
969 val = xive->cq_regs[reg];
970 break;
971 case CQ_MSGSND: /* TODO check the #cores of the machine */
972 val = 0xffffffff00000000;
973 break;
974 case CQ_CFG_PB_GEN:
975 val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
976 break;
977 default:
978 xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
979 }
980
981 return val;
982 }
983
pnv_xive2_bar_size(uint64_t val)984 static uint64_t pnv_xive2_bar_size(uint64_t val)
985 {
986 return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
987 }
988
pnv_xive2_ic_cq_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)989 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
990 uint64_t val, unsigned size)
991 {
992 PnvXive2 *xive = PNV_XIVE2(opaque);
993 MemoryRegion *sysmem = get_system_memory();
994 uint32_t reg = offset >> 3;
995 int i;
996
997 switch (offset) {
998 case CQ_XIVE_CFG:
999 case CQ_RST_CTL: /* TODO: reset all BARs */
1000 break;
1001
1002 case CQ_IC_BAR:
1003 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
1004 if (!(val & CQ_IC_BAR_VALID)) {
1005 xive->ic_base = 0;
1006 if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
1007 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1008 memory_region_del_subregion(&xive->ic_mmio,
1009 &xive->ic_mmios[i]);
1010 }
1011 memory_region_del_subregion(sysmem, &xive->ic_mmio);
1012 }
1013 } else {
1014 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
1015 if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
1016 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1017 memory_region_add_subregion(&xive->ic_mmio,
1018 pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
1019 &xive->ic_mmios[i]);
1020 }
1021 memory_region_add_subregion(sysmem, xive->ic_base,
1022 &xive->ic_mmio);
1023 }
1024 }
1025 break;
1026
1027 case CQ_TM_BAR:
1028 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1029 if (!(val & CQ_TM_BAR_VALID)) {
1030 xive->tm_base = 0;
1031 if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
1032 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1033 }
1034 } else {
1035 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1036 if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
1037 memory_region_add_subregion(sysmem, xive->tm_base,
1038 &xive->tm_mmio);
1039 }
1040 }
1041 break;
1042
1043 case CQ_ESB_BAR:
1044 xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
1045 if (!(val & CQ_BAR_VALID)) {
1046 xive->esb_base = 0;
1047 if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1048 memory_region_del_subregion(sysmem, &xive->esb_mmio);
1049 }
1050 } else {
1051 xive->esb_base = val & CQ_BAR_ADDR;
1052 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1053 memory_region_set_size(&xive->esb_mmio,
1054 pnv_xive2_bar_size(val));
1055 memory_region_add_subregion(sysmem, xive->esb_base,
1056 &xive->esb_mmio);
1057 }
1058 }
1059 break;
1060
1061 case CQ_END_BAR:
1062 xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
1063 if (!(val & CQ_BAR_VALID)) {
1064 xive->end_base = 0;
1065 if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1066 memory_region_del_subregion(sysmem, &xive->end_mmio);
1067 }
1068 } else {
1069 xive->end_base = val & CQ_BAR_ADDR;
1070 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1071 memory_region_set_size(&xive->end_mmio,
1072 pnv_xive2_bar_size(val));
1073 memory_region_add_subregion(sysmem, xive->end_base,
1074 &xive->end_mmio);
1075 }
1076 }
1077 break;
1078
1079 case CQ_NVC_BAR:
1080 xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
1081 if (!(val & CQ_BAR_VALID)) {
1082 xive->nvc_base = 0;
1083 if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1084 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
1085 }
1086 } else {
1087 xive->nvc_base = val & CQ_BAR_ADDR;
1088 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1089 memory_region_set_size(&xive->nvc_mmio,
1090 pnv_xive2_bar_size(val));
1091 memory_region_add_subregion(sysmem, xive->nvc_base,
1092 &xive->nvc_mmio);
1093 }
1094 }
1095 break;
1096
1097 case CQ_NVPG_BAR:
1098 xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
1099 if (!(val & CQ_BAR_VALID)) {
1100 xive->nvpg_base = 0;
1101 if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1102 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
1103 }
1104 } else {
1105 xive->nvpg_base = val & CQ_BAR_ADDR;
1106 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1107 memory_region_set_size(&xive->nvpg_mmio,
1108 pnv_xive2_bar_size(val));
1109 memory_region_add_subregion(sysmem, xive->nvpg_base,
1110 &xive->nvpg_mmio);
1111 }
1112 }
1113 break;
1114
1115 case CQ_TAR: /* Set Translation Table Address */
1116 break;
1117 case CQ_TDR: /* Set Translation Table Data */
1118 pnv_xive2_stt_set_data(xive, val);
1119 break;
1120 case CQ_FIRMASK_OR: /* FIR error reporting */
1121 break;
1122 default:
1123 xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
1124 return;
1125 }
1126
1127 xive->cq_regs[reg] = val;
1128 }
1129
1130 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
1131 .read = pnv_xive2_ic_cq_read,
1132 .write = pnv_xive2_ic_cq_write,
1133 .endianness = DEVICE_BIG_ENDIAN,
1134 .valid = {
1135 .min_access_size = 8,
1136 .max_access_size = 8,
1137 },
1138 .impl = {
1139 .min_access_size = 8,
1140 .max_access_size = 8,
1141 },
1142 };
1143
pnv_xive2_cache_watch_assign(uint64_t engine_mask,uint64_t * state)1144 static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask,
1145 uint64_t *state)
1146 {
1147 uint8_t val = 0xFF;
1148 int i;
1149
1150 for (i = 3; i >= 0; i--) {
1151 if (BIT(i) & engine_mask) {
1152 if (!(BIT(i) & *state)) {
1153 *state |= BIT(i);
1154 val = 3 - i;
1155 break;
1156 }
1157 }
1158 }
1159 return val;
1160 }
1161
pnv_xive2_cache_watch_release(uint64_t * state,uint8_t watch_engine)1162 static void pnv_xive2_cache_watch_release(uint64_t *state, uint8_t watch_engine)
1163 {
1164 uint8_t engine_bit = 3 - watch_engine;
1165
1166 if (*state & BIT(engine_bit)) {
1167 *state &= ~BIT(engine_bit);
1168 }
1169 }
1170
pnv_xive2_endc_cache_watch_assign(PnvXive2 * xive)1171 static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2 *xive)
1172 {
1173 uint64_t engine_mask = GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN,
1174 xive->vc_regs[VC_ENDC_CFG >> 3]);
1175 uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1176 uint8_t val;
1177
1178 /*
1179 * We keep track of which engines are currently busy in the
1180 * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
1181 * the register, we don't return its value but the ID of an engine
1182 * it can use.
1183 * There are 4 engines. 0xFF means no engine is available.
1184 */
1185 val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1186 if (val != 0xFF) {
1187 xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1188 }
1189 return val;
1190 }
1191
pnv_xive2_endc_cache_watch_release(PnvXive2 * xive,uint8_t watch_engine)1192 static void pnv_xive2_endc_cache_watch_release(PnvXive2 *xive,
1193 uint8_t watch_engine)
1194 {
1195 uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1196
1197 pnv_xive2_cache_watch_release(&state, watch_engine);
1198 xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1199 }
1200
pnv_xive2_ic_vc_read(void * opaque,hwaddr offset,unsigned size)1201 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
1202 unsigned size)
1203 {
1204 PnvXive2 *xive = PNV_XIVE2(opaque);
1205 uint64_t val = 0;
1206 uint32_t reg = offset >> 3;
1207 uint8_t watch_engine;
1208
1209 switch (offset) {
1210 /*
1211 * VSD table settings.
1212 */
1213 case VC_VSD_TABLE_ADDR:
1214 case VC_VSD_TABLE_DATA:
1215 val = xive->vc_regs[reg];
1216 break;
1217
1218 /*
1219 * ESB cache updates (not modeled)
1220 */
1221 case VC_ESBC_FLUSH_CTRL:
1222 xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
1223 val = xive->vc_regs[reg];
1224 break;
1225
1226 case VC_ESBC_CFG:
1227 val = xive->vc_regs[reg];
1228 break;
1229
1230 /*
1231 * EAS cache updates (not modeled)
1232 */
1233 case VC_EASC_FLUSH_CTRL:
1234 xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
1235 val = xive->vc_regs[reg];
1236 break;
1237
1238 case VC_ENDC_WATCH_ASSIGN:
1239 val = pnv_xive2_endc_cache_watch_assign(xive);
1240 break;
1241
1242 case VC_ENDC_CFG:
1243 val = xive->vc_regs[reg];
1244 break;
1245
1246 /*
1247 * END cache updates
1248 */
1249 case VC_ENDC_WATCH0_SPEC:
1250 case VC_ENDC_WATCH1_SPEC:
1251 case VC_ENDC_WATCH2_SPEC:
1252 case VC_ENDC_WATCH3_SPEC:
1253 watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
1254 xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
1255 pnv_xive2_endc_cache_watch_release(xive, watch_engine);
1256 val = xive->vc_regs[reg];
1257 break;
1258
1259 case VC_ENDC_WATCH0_DATA0:
1260 case VC_ENDC_WATCH1_DATA0:
1261 case VC_ENDC_WATCH2_DATA0:
1262 case VC_ENDC_WATCH3_DATA0:
1263 /*
1264 * Load DATA registers from cache with data requested by the
1265 * SPEC register
1266 */
1267 watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1268 pnv_xive2_end_cache_load(xive, watch_engine);
1269 val = xive->vc_regs[reg];
1270 break;
1271
1272 case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1273 case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1274 case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1275 case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1276 val = xive->vc_regs[reg];
1277 break;
1278
1279 case VC_ENDC_FLUSH_CTRL:
1280 xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
1281 val = xive->vc_regs[reg];
1282 break;
1283
1284 /*
1285 * Indirect invalidation
1286 */
1287 case VC_AT_MACRO_KILL_MASK:
1288 val = xive->vc_regs[reg];
1289 break;
1290
1291 case VC_AT_MACRO_KILL:
1292 xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1293 val = xive->vc_regs[reg];
1294 break;
1295
1296 /*
1297 * Interrupt fifo overflow in memory backing store (Not modeled)
1298 */
1299 case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1300 val = xive->vc_regs[reg];
1301 break;
1302
1303 /*
1304 * Synchronisation
1305 */
1306 case VC_ENDC_SYNC_DONE:
1307 val = VC_ENDC_SYNC_POLL_DONE;
1308 break;
1309 default:
1310 xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1311 }
1312
1313 return val;
1314 }
1315
pnv_xive2_ic_vc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1316 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1317 uint64_t val, unsigned size)
1318 {
1319 PnvXive2 *xive = PNV_XIVE2(opaque);
1320 uint32_t reg = offset >> 3;
1321 uint8_t watch_engine;
1322
1323 switch (offset) {
1324 /*
1325 * VSD table settings.
1326 */
1327 case VC_VSD_TABLE_ADDR:
1328 break;
1329 case VC_VSD_TABLE_DATA:
1330 pnv_xive2_vc_vst_set_data(xive, val);
1331 break;
1332
1333 /*
1334 * ESB cache updates (not modeled)
1335 */
1336 /* case VC_ESBC_FLUSH_CTRL: */
1337 case VC_ESBC_FLUSH_POLL:
1338 xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1339 /* ESB update */
1340 break;
1341
1342 case VC_ESBC_FLUSH_INJECT:
1343 pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ESBC);
1344 break;
1345
1346 case VC_ESBC_CFG:
1347 break;
1348
1349 /*
1350 * EAS cache updates (not modeled)
1351 */
1352 /* case VC_EASC_FLUSH_CTRL: */
1353 case VC_EASC_FLUSH_POLL:
1354 xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1355 /* EAS update */
1356 break;
1357
1358 case VC_EASC_FLUSH_INJECT:
1359 pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_EASC);
1360 break;
1361
1362 case VC_ENDC_CFG:
1363 break;
1364
1365 /*
1366 * END cache updates
1367 */
1368 case VC_ENDC_WATCH0_SPEC:
1369 case VC_ENDC_WATCH1_SPEC:
1370 case VC_ENDC_WATCH2_SPEC:
1371 case VC_ENDC_WATCH3_SPEC:
1372 val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1373 break;
1374
1375 case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1376 case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1377 case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1378 case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1379 break;
1380 case VC_ENDC_WATCH0_DATA0:
1381 case VC_ENDC_WATCH1_DATA0:
1382 case VC_ENDC_WATCH2_DATA0:
1383 case VC_ENDC_WATCH3_DATA0:
1384 /* writing to DATA0 triggers the cache write */
1385 watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1386 xive->vc_regs[reg] = val;
1387 pnv_xive2_end_update(xive, watch_engine);
1388 break;
1389
1390
1391 /* case VC_ENDC_FLUSH_CTRL: */
1392 case VC_ENDC_FLUSH_POLL:
1393 xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1394 break;
1395
1396 case VC_ENDC_FLUSH_INJECT:
1397 pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ENDC);
1398 break;
1399
1400 /*
1401 * Indirect invalidation
1402 */
1403 case VC_AT_MACRO_KILL:
1404 case VC_AT_MACRO_KILL_MASK:
1405 break;
1406
1407 /*
1408 * Interrupt fifo overflow in memory backing store (Not modeled)
1409 */
1410 case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1411 break;
1412
1413 /*
1414 * Synchronisation
1415 */
1416 case VC_ENDC_SYNC_DONE:
1417 break;
1418
1419 default:
1420 xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1421 return;
1422 }
1423
1424 xive->vc_regs[reg] = val;
1425 }
1426
1427 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1428 .read = pnv_xive2_ic_vc_read,
1429 .write = pnv_xive2_ic_vc_write,
1430 .endianness = DEVICE_BIG_ENDIAN,
1431 .valid = {
1432 .min_access_size = 8,
1433 .max_access_size = 8,
1434 },
1435 .impl = {
1436 .min_access_size = 8,
1437 .max_access_size = 8,
1438 },
1439 };
1440
pnv_xive2_nxc_cache_watch_assign(PnvXive2 * xive)1441 static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2 *xive)
1442 {
1443 uint64_t engine_mask = GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN,
1444 xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
1445 uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1446 uint8_t val;
1447
1448 /*
1449 * We keep track of which engines are currently busy in the
1450 * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
1451 * the register, we don't return its value but the ID of an engine
1452 * it can use.
1453 * There are 4 engines. 0xFF means no engine is available.
1454 */
1455 val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1456 if (val != 0xFF) {
1457 xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1458 }
1459 return val;
1460 }
1461
pnv_xive2_nxc_cache_watch_release(PnvXive2 * xive,uint8_t watch_engine)1462 static void pnv_xive2_nxc_cache_watch_release(PnvXive2 *xive,
1463 uint8_t watch_engine)
1464 {
1465 uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1466
1467 pnv_xive2_cache_watch_release(&state, watch_engine);
1468 xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1469 }
1470
pnv_xive2_ic_pc_read(void * opaque,hwaddr offset,unsigned size)1471 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1472 unsigned size)
1473 {
1474 PnvXive2 *xive = PNV_XIVE2(opaque);
1475 uint64_t val = -1;
1476 uint32_t reg = offset >> 3;
1477 uint8_t watch_engine;
1478
1479 switch (offset) {
1480 /*
1481 * VSD table settings.
1482 */
1483 case PC_VSD_TABLE_ADDR:
1484 case PC_VSD_TABLE_DATA:
1485 val = xive->pc_regs[reg];
1486 break;
1487
1488 case PC_NXC_WATCH_ASSIGN:
1489 val = pnv_xive2_nxc_cache_watch_assign(xive);
1490 break;
1491
1492 case PC_NXC_PROC_CONFIG:
1493 val = xive->pc_regs[reg];
1494 break;
1495
1496 /*
1497 * cache updates
1498 */
1499 case PC_NXC_WATCH0_SPEC:
1500 case PC_NXC_WATCH1_SPEC:
1501 case PC_NXC_WATCH2_SPEC:
1502 case PC_NXC_WATCH3_SPEC:
1503 watch_engine = (offset - PC_NXC_WATCH0_SPEC) >> 6;
1504 xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1505 pnv_xive2_nxc_cache_watch_release(xive, watch_engine);
1506 val = xive->pc_regs[reg];
1507 break;
1508
1509 case PC_NXC_WATCH0_DATA0:
1510 case PC_NXC_WATCH1_DATA0:
1511 case PC_NXC_WATCH2_DATA0:
1512 case PC_NXC_WATCH3_DATA0:
1513 /*
1514 * Load DATA registers from cache with data requested by the
1515 * SPEC register
1516 */
1517 watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1518 pnv_xive2_nxc_cache_load(xive, watch_engine);
1519 val = xive->pc_regs[reg];
1520 break;
1521
1522 case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1523 case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1524 case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1525 case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1526 val = xive->pc_regs[reg];
1527 break;
1528
1529 case PC_NXC_FLUSH_CTRL:
1530 xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1531 val = xive->pc_regs[reg];
1532 break;
1533
1534 /*
1535 * Indirect invalidation
1536 */
1537 case PC_AT_KILL:
1538 xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1539 val = xive->pc_regs[reg];
1540 break;
1541
1542 default:
1543 xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1544 }
1545
1546 return val;
1547 }
1548
pnv_xive2_pc_vst_set_data(PnvXive2 * xive,uint64_t vsd)1549 static void pnv_xive2_pc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
1550 {
1551 uint8_t type = GETFIELD(PC_VSD_TABLE_SELECT,
1552 xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1553 uint8_t blk = GETFIELD(PC_VSD_TABLE_ADDRESS,
1554 xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1555
1556 pnv_xive2_vst_set_data(xive, vsd, type, blk);
1557 }
1558
pnv_xive2_ic_pc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1559 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1560 uint64_t val, unsigned size)
1561 {
1562 PnvXive2 *xive = PNV_XIVE2(opaque);
1563 uint32_t reg = offset >> 3;
1564 uint8_t watch_engine;
1565
1566 switch (offset) {
1567
1568 /*
1569 * VSD table settings.
1570 * The Xive2Router model combines both VC and PC sub-engines. We
1571 * allow to configure the tables through both, for the rare cases
1572 * where a table only really needs to be configured for one of
1573 * them (e.g. the NVG table for the presenter). It assumes that
1574 * firmware passes the same address to the VC and PC when tables
1575 * are defined for both, which seems acceptable.
1576 */
1577 case PC_VSD_TABLE_ADDR:
1578 break;
1579 case PC_VSD_TABLE_DATA:
1580 pnv_xive2_pc_vst_set_data(xive, val);
1581 break;
1582
1583 case PC_NXC_PROC_CONFIG:
1584 break;
1585
1586 /*
1587 * cache updates
1588 */
1589 case PC_NXC_WATCH0_SPEC:
1590 case PC_NXC_WATCH1_SPEC:
1591 case PC_NXC_WATCH2_SPEC:
1592 case PC_NXC_WATCH3_SPEC:
1593 val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1594 break;
1595
1596 case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1597 case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1598 case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1599 case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1600 break;
1601 case PC_NXC_WATCH0_DATA0:
1602 case PC_NXC_WATCH1_DATA0:
1603 case PC_NXC_WATCH2_DATA0:
1604 case PC_NXC_WATCH3_DATA0:
1605 /* writing to DATA0 triggers the cache write */
1606 watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1607 xive->pc_regs[reg] = val;
1608 pnv_xive2_nxc_update(xive, watch_engine);
1609 break;
1610
1611 /* case PC_NXC_FLUSH_CTRL: */
1612 case PC_NXC_FLUSH_POLL:
1613 xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1614 break;
1615
1616 case PC_NXC_FLUSH_INJECT:
1617 pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_NXC);
1618 break;
1619
1620 /*
1621 * Indirect invalidation
1622 */
1623 case PC_AT_KILL:
1624 case PC_AT_KILL_MASK:
1625 break;
1626
1627 default:
1628 xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1629 return;
1630 }
1631
1632 xive->pc_regs[reg] = val;
1633 }
1634
1635 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1636 .read = pnv_xive2_ic_pc_read,
1637 .write = pnv_xive2_ic_pc_write,
1638 .endianness = DEVICE_BIG_ENDIAN,
1639 .valid = {
1640 .min_access_size = 8,
1641 .max_access_size = 8,
1642 },
1643 .impl = {
1644 .min_access_size = 8,
1645 .max_access_size = 8,
1646 },
1647 };
1648
1649
pnv_xive2_ic_tctxt_read(void * opaque,hwaddr offset,unsigned size)1650 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1651 unsigned size)
1652 {
1653 PnvXive2 *xive = PNV_XIVE2(opaque);
1654 uint64_t val = -1;
1655 uint32_t reg = offset >> 3;
1656
1657 switch (offset) {
1658 /*
1659 * XIVE2 hardware thread enablement
1660 */
1661 case TCTXT_EN0:
1662 case TCTXT_EN1:
1663 val = xive->tctxt_regs[reg];
1664 break;
1665
1666 case TCTXT_EN0_SET:
1667 case TCTXT_EN0_RESET:
1668 val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1669 break;
1670 case TCTXT_EN1_SET:
1671 case TCTXT_EN1_RESET:
1672 val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1673 break;
1674 case TCTXT_CFG:
1675 val = xive->tctxt_regs[reg];
1676 break;
1677 default:
1678 xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1679 }
1680
1681 return val;
1682 }
1683
pnv_xive2_ic_tctxt_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1684 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1685 uint64_t val, unsigned size)
1686 {
1687 PnvXive2 *xive = PNV_XIVE2(opaque);
1688 uint32_t reg = offset >> 3;
1689
1690 switch (offset) {
1691 /*
1692 * XIVE2 hardware thread enablement
1693 */
1694 case TCTXT_EN0: /* Physical Thread Enable */
1695 case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1696 xive->tctxt_regs[reg] = val;
1697 break;
1698
1699 case TCTXT_EN0_SET:
1700 xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1701 break;
1702 case TCTXT_EN1_SET:
1703 xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1704 break;
1705 case TCTXT_EN0_RESET:
1706 xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1707 break;
1708 case TCTXT_EN1_RESET:
1709 xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1710 break;
1711 case TCTXT_CFG:
1712 xive->tctxt_regs[reg] = val;
1713 break;
1714 default:
1715 xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1716 return;
1717 }
1718 }
1719
1720 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1721 .read = pnv_xive2_ic_tctxt_read,
1722 .write = pnv_xive2_ic_tctxt_write,
1723 .endianness = DEVICE_BIG_ENDIAN,
1724 .valid = {
1725 .min_access_size = 8,
1726 .max_access_size = 8,
1727 },
1728 .impl = {
1729 .min_access_size = 8,
1730 .max_access_size = 8,
1731 },
1732 };
1733
1734 /*
1735 * Redirect XSCOM to MMIO handlers
1736 */
pnv_xive2_xscom_read(void * opaque,hwaddr offset,unsigned size)1737 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1738 unsigned size)
1739 {
1740 PnvXive2 *xive = PNV_XIVE2(opaque);
1741 uint64_t val = -1;
1742 uint32_t xscom_reg = offset >> 3;
1743 uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1744
1745 switch (xscom_reg) {
1746 case 0x000 ... 0x0FF:
1747 val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1748 break;
1749 case 0x100 ... 0x1FF:
1750 val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1751 break;
1752 case 0x200 ... 0x2FF:
1753 val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1754 break;
1755 case 0x300 ... 0x3FF:
1756 val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1757 break;
1758 default:
1759 xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1760 }
1761
1762 return val;
1763 }
1764
pnv_xive2_xscom_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1765 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1766 uint64_t val, unsigned size)
1767 {
1768 PnvXive2 *xive = PNV_XIVE2(opaque);
1769 uint32_t xscom_reg = offset >> 3;
1770 uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1771
1772 switch (xscom_reg) {
1773 case 0x000 ... 0x0FF:
1774 pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1775 break;
1776 case 0x100 ... 0x1FF:
1777 pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1778 break;
1779 case 0x200 ... 0x2FF:
1780 pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1781 break;
1782 case 0x300 ... 0x3FF:
1783 pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1784 break;
1785 default:
1786 xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1787 }
1788 }
1789
1790 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1791 .read = pnv_xive2_xscom_read,
1792 .write = pnv_xive2_xscom_write,
1793 .endianness = DEVICE_BIG_ENDIAN,
1794 .valid = {
1795 .min_access_size = 8,
1796 .max_access_size = 8,
1797 },
1798 .impl = {
1799 .min_access_size = 8,
1800 .max_access_size = 8,
1801 },
1802 };
1803
1804 /*
1805 * Notify port page. The layout is compatible between 4K and 64K pages :
1806 *
1807 * Page 1 Notify page (writes only)
1808 * 0x000 - 0x7FF IPI interrupt (NPU)
1809 * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB)
1810 */
1811
pnv_xive2_ic_hw_trigger(PnvXive2 * xive,hwaddr addr,uint64_t val)1812 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1813 uint64_t val)
1814 {
1815 uint8_t blk;
1816 uint32_t idx;
1817
1818 if (val & XIVE_TRIGGER_END) {
1819 xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1820 addr, val);
1821 return;
1822 }
1823
1824 /*
1825 * Forward the source event notification directly to the Router.
1826 * The source interrupt number should already be correctly encoded
1827 * with the chip block id by the sending device (PHB, PSI).
1828 */
1829 blk = XIVE_EAS_BLOCK(val);
1830 idx = XIVE_EAS_INDEX(val);
1831
1832 xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1833 !!(val & XIVE_TRIGGER_PQ));
1834 }
1835
pnv_xive2_ic_notify_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1836 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1837 uint64_t val, unsigned size)
1838 {
1839 PnvXive2 *xive = PNV_XIVE2(opaque);
1840
1841 /* VC: IPI triggers */
1842 switch (offset) {
1843 case 0x000 ... 0x7FF:
1844 /* TODO: check IPI notify sub-page routing */
1845 pnv_xive2_ic_hw_trigger(opaque, offset, val);
1846 break;
1847
1848 /* VC: HW triggers */
1849 case 0x800 ... 0xFFF:
1850 pnv_xive2_ic_hw_trigger(opaque, offset, val);
1851 break;
1852
1853 default:
1854 xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1855 }
1856 }
1857
pnv_xive2_ic_notify_read(void * opaque,hwaddr offset,unsigned size)1858 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1859 unsigned size)
1860 {
1861 PnvXive2 *xive = PNV_XIVE2(opaque);
1862
1863 /* loads are invalid */
1864 xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1865 return -1;
1866 }
1867
1868 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1869 .read = pnv_xive2_ic_notify_read,
1870 .write = pnv_xive2_ic_notify_write,
1871 .endianness = DEVICE_BIG_ENDIAN,
1872 .valid = {
1873 .min_access_size = 8,
1874 .max_access_size = 8,
1875 },
1876 .impl = {
1877 .min_access_size = 8,
1878 .max_access_size = 8,
1879 },
1880 };
1881
pnv_xive2_ic_lsi_read(void * opaque,hwaddr offset,unsigned size)1882 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1883 unsigned size)
1884 {
1885 PnvXive2 *xive = PNV_XIVE2(opaque);
1886
1887 xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1888 return -1;
1889 }
1890
pnv_xive2_ic_lsi_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1891 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1892 uint64_t val, unsigned size)
1893 {
1894 PnvXive2 *xive = PNV_XIVE2(opaque);
1895
1896 xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1897 }
1898
1899 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1900 .read = pnv_xive2_ic_lsi_read,
1901 .write = pnv_xive2_ic_lsi_write,
1902 .endianness = DEVICE_BIG_ENDIAN,
1903 .valid = {
1904 .min_access_size = 8,
1905 .max_access_size = 8,
1906 },
1907 .impl = {
1908 .min_access_size = 8,
1909 .max_access_size = 8,
1910 },
1911 };
1912
1913 /*
1914 * Sync MMIO page (write only)
1915 */
1916 #define PNV_XIVE2_SYNC_IPI 0x000
1917 #define PNV_XIVE2_SYNC_HW 0x080
1918 #define PNV_XIVE2_SYNC_NxC 0x100
1919 #define PNV_XIVE2_SYNC_INT 0x180
1920 #define PNV_XIVE2_SYNC_OS_ESC 0x200
1921 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
1922 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
1923 #define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO 0x800
1924 #define PNV_XIVE2_SYNC_NXC_LD_LCL_CO 0x880
1925 #define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI 0x900
1926 #define PNV_XIVE2_SYNC_NXC_ST_LCL_CI 0x980
1927 #define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI 0xA00
1928 #define PNV_XIVE2_SYNC_NXC_ST_RMT_CI 0xA80
1929
pnv_xive2_ic_sync_read(void * opaque,hwaddr offset,unsigned size)1930 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1931 unsigned size)
1932 {
1933 PnvXive2 *xive = PNV_XIVE2(opaque);
1934
1935 /* loads are invalid */
1936 xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1937 return -1;
1938 }
1939
1940 /*
1941 * The sync MMIO space spans two pages. The lower page is use for
1942 * queue sync "poll" requests while the upper page is used for queue
1943 * sync "inject" requests. Inject requests require the HW to write
1944 * a byte of all 1's to a predetermined location in memory in order
1945 * to signal completion of the request. Both pages have the same
1946 * layout, so it is easiest to handle both with a single function.
1947 */
pnv_xive2_ic_sync_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1948 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
1949 uint64_t val, unsigned size)
1950 {
1951 PnvXive2 *xive = PNV_XIVE2(opaque);
1952 int inject_type;
1953 hwaddr pg_offset_mask = (1ull << xive->ic_shift) - 1;
1954
1955 /* adjust offset for inject page */
1956 hwaddr adj_offset = offset & pg_offset_mask;
1957
1958 switch (adj_offset) {
1959 case PNV_XIVE2_SYNC_IPI:
1960 inject_type = PNV_XIVE2_QUEUE_IPI;
1961 break;
1962 case PNV_XIVE2_SYNC_HW:
1963 inject_type = PNV_XIVE2_QUEUE_HW;
1964 break;
1965 case PNV_XIVE2_SYNC_NxC:
1966 inject_type = PNV_XIVE2_QUEUE_NXC;
1967 break;
1968 case PNV_XIVE2_SYNC_INT:
1969 inject_type = PNV_XIVE2_QUEUE_INT;
1970 break;
1971 case PNV_XIVE2_SYNC_OS_ESC:
1972 inject_type = PNV_XIVE2_QUEUE_OS;
1973 break;
1974 case PNV_XIVE2_SYNC_POOL_ESC:
1975 inject_type = PNV_XIVE2_QUEUE_POOL;
1976 break;
1977 case PNV_XIVE2_SYNC_HARD_ESC:
1978 inject_type = PNV_XIVE2_QUEUE_HARD;
1979 break;
1980 case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO:
1981 inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO;
1982 break;
1983 case PNV_XIVE2_SYNC_NXC_LD_LCL_CO:
1984 inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_CO;
1985 break;
1986 case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI:
1987 inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI;
1988 break;
1989 case PNV_XIVE2_SYNC_NXC_ST_LCL_CI:
1990 inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_CI;
1991 break;
1992 case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI:
1993 inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI;
1994 break;
1995 case PNV_XIVE2_SYNC_NXC_ST_RMT_CI:
1996 inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
1997 break;
1998 default:
1999 xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
2000 return;
2001 }
2002
2003 /* Write Queue Sync notification byte if writing to sync inject page */
2004 if ((offset & ~pg_offset_mask) != 0) {
2005 pnv_xive2_inject_notify(xive, inject_type);
2006 }
2007 }
2008
2009 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
2010 .read = pnv_xive2_ic_sync_read,
2011 .write = pnv_xive2_ic_sync_write,
2012 .endianness = DEVICE_BIG_ENDIAN,
2013 .valid = {
2014 .min_access_size = 8,
2015 .max_access_size = 8,
2016 },
2017 .impl = {
2018 .min_access_size = 8,
2019 .max_access_size = 8,
2020 },
2021 };
2022
2023 /*
2024 * When the TM direct pages of the IC controller are accessed, the
2025 * target HW thread is deduced from the page offset.
2026 */
pnv_xive2_ic_tm_get_pir(PnvXive2 * xive,hwaddr offset)2027 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
2028 {
2029 /* On P10, the node ID shift in the PIR register is 8 bits */
2030 return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
2031 }
2032
pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 * xive,hwaddr offset)2033 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 *xive,
2034 hwaddr offset)
2035 {
2036 /*
2037 * Indirect TIMA accesses are similar to direct accesses for
2038 * privilege ring 0. So remove any traces of the hw thread ID from
2039 * the offset in the IC BAR as it could be interpreted as the ring
2040 * privilege when calling the underlying direct access functions.
2041 */
2042 return offset & ((1ull << xive->ic_shift) - 1);
2043 }
2044
pnv_xive2_get_indirect_tctx(PnvXive2 * xive,uint32_t pir)2045 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
2046 {
2047 PnvChip *chip = xive->chip;
2048 PowerPCCPU *cpu = NULL;
2049
2050 cpu = pnv_chip_find_cpu(chip, pir);
2051 if (!cpu) {
2052 xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
2053 return NULL;
2054 }
2055
2056 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
2057 xive2_error(xive, "IC: CPU %x is not enabled", pir);
2058 }
2059
2060 return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2061 }
2062
pnv_xive2_ic_tm_indirect_read(void * opaque,hwaddr offset,unsigned size)2063 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
2064 unsigned size)
2065 {
2066 PnvXive2 *xive = PNV_XIVE2(opaque);
2067 XivePresenter *xptr = XIVE_PRESENTER(xive);
2068 hwaddr hw_page_offset;
2069 uint32_t pir;
2070 XiveTCTX *tctx;
2071 uint64_t val = -1;
2072
2073 pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2074 hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2075 tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2076 if (tctx) {
2077 val = xive_tctx_tm_read(xptr, tctx, hw_page_offset, size);
2078 }
2079
2080 return val;
2081 }
2082
pnv_xive2_ic_tm_indirect_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2083 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
2084 uint64_t val, unsigned size)
2085 {
2086 PnvXive2 *xive = PNV_XIVE2(opaque);
2087 XivePresenter *xptr = XIVE_PRESENTER(xive);
2088 hwaddr hw_page_offset;
2089 uint32_t pir;
2090 XiveTCTX *tctx;
2091
2092 pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2093 hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2094 tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2095 if (tctx) {
2096 xive_tctx_tm_write(xptr, tctx, hw_page_offset, val, size);
2097 }
2098 }
2099
2100 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
2101 .read = pnv_xive2_ic_tm_indirect_read,
2102 .write = pnv_xive2_ic_tm_indirect_write,
2103 .endianness = DEVICE_BIG_ENDIAN,
2104 .valid = {
2105 .min_access_size = 1,
2106 .max_access_size = 8,
2107 },
2108 .impl = {
2109 .min_access_size = 1,
2110 .max_access_size = 8,
2111 },
2112 };
2113
2114 /*
2115 * TIMA ops
2116 */
pnv_xive2_tm_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)2117 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
2118 uint64_t value, unsigned size)
2119 {
2120 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2121 PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2122 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2123 XivePresenter *xptr = XIVE_PRESENTER(xive);
2124
2125 xive_tctx_tm_write(xptr, tctx, offset, value, size);
2126 }
2127
pnv_xive2_tm_read(void * opaque,hwaddr offset,unsigned size)2128 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
2129 {
2130 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2131 PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2132 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2133 XivePresenter *xptr = XIVE_PRESENTER(xive);
2134
2135 return xive_tctx_tm_read(xptr, tctx, offset, size);
2136 }
2137
2138 static const MemoryRegionOps pnv_xive2_tm_ops = {
2139 .read = pnv_xive2_tm_read,
2140 .write = pnv_xive2_tm_write,
2141 .endianness = DEVICE_BIG_ENDIAN,
2142 .valid = {
2143 .min_access_size = 1,
2144 .max_access_size = 8,
2145 },
2146 .impl = {
2147 .min_access_size = 1,
2148 .max_access_size = 8,
2149 },
2150 };
2151
pnv_xive2_nvc_read(void * opaque,hwaddr offset,unsigned size)2152 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
2153 unsigned size)
2154 {
2155 PnvXive2 *xive = PNV_XIVE2(opaque);
2156
2157 xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
2158 return -1;
2159 }
2160
pnv_xive2_nvc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2161 static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
2162 uint64_t val, unsigned size)
2163 {
2164 PnvXive2 *xive = PNV_XIVE2(opaque);
2165
2166 xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
2167 }
2168
2169 static const MemoryRegionOps pnv_xive2_nvc_ops = {
2170 .read = pnv_xive2_nvc_read,
2171 .write = pnv_xive2_nvc_write,
2172 .endianness = DEVICE_BIG_ENDIAN,
2173 .valid = {
2174 .min_access_size = 8,
2175 .max_access_size = 8,
2176 },
2177 .impl = {
2178 .min_access_size = 8,
2179 .max_access_size = 8,
2180 },
2181 };
2182
pnv_xive2_nvpg_read(void * opaque,hwaddr offset,unsigned size)2183 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
2184 unsigned size)
2185 {
2186 PnvXive2 *xive = PNV_XIVE2(opaque);
2187
2188 xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
2189 return -1;
2190 }
2191
pnv_xive2_nvpg_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)2192 static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
2193 uint64_t val, unsigned size)
2194 {
2195 PnvXive2 *xive = PNV_XIVE2(opaque);
2196
2197 xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
2198 }
2199
2200 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
2201 .read = pnv_xive2_nvpg_read,
2202 .write = pnv_xive2_nvpg_write,
2203 .endianness = DEVICE_BIG_ENDIAN,
2204 .valid = {
2205 .min_access_size = 8,
2206 .max_access_size = 8,
2207 },
2208 .impl = {
2209 .min_access_size = 8,
2210 .max_access_size = 8,
2211 },
2212 };
2213
2214 /*
2215 * POWER10 default capabilities: 0x2000120076f000FC
2216 */
2217 #define PNV_XIVE2_CAPABILITIES 0x2000120076f000FC
2218
2219 /*
2220 * POWER10 default configuration: 0x0030000033000000
2221 *
2222 * 8bits thread id was dropped for P10
2223 */
2224 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
2225
pnv_xive2_reset(void * dev)2226 static void pnv_xive2_reset(void *dev)
2227 {
2228 PnvXive2 *xive = PNV_XIVE2(dev);
2229 XiveSource *xsrc = &xive->ipi_source;
2230 Xive2EndSource *end_xsrc = &xive->end_source;
2231
2232 xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
2233 xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
2234
2235 /* HW hardwires the #Topology of the chip in the block field */
2236 xive->cq_regs[CQ_XIVE_CFG >> 3] |=
2237 SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
2238
2239 /* VC and PC cache watch assign mechanism */
2240 xive->vc_regs[VC_ENDC_CFG >> 3] =
2241 SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN, 0ull, 0b0111);
2242 xive->pc_regs[PC_NXC_PROC_CONFIG >> 3] =
2243 SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN, 0ull, 0b0111);
2244
2245 /* Set default page size to 64k */
2246 xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
2247 xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
2248
2249 /* Clear source MMIOs */
2250 if (memory_region_is_mapped(&xsrc->esb_mmio)) {
2251 memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
2252 }
2253
2254 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
2255 memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
2256 }
2257 }
2258
2259 /*
2260 * Maximum number of IRQs and ENDs supported by HW. Will be tuned by
2261 * software.
2262 */
2263 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2264 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2265
pnv_xive2_realize(DeviceState * dev,Error ** errp)2266 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
2267 {
2268 PnvXive2 *xive = PNV_XIVE2(dev);
2269 PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
2270 XiveSource *xsrc = &xive->ipi_source;
2271 Xive2EndSource *end_xsrc = &xive->end_source;
2272 Error *local_err = NULL;
2273 int i;
2274
2275 pxc->parent_realize(dev, &local_err);
2276 if (local_err) {
2277 error_propagate(errp, local_err);
2278 return;
2279 }
2280
2281 assert(xive->chip);
2282
2283 /*
2284 * The XiveSource and Xive2EndSource objects are realized with the
2285 * maximum allowed HW configuration. The ESB MMIO regions will be
2286 * resized dynamically when the controller is configured by the FW
2287 * to limit accesses to resources not provisioned.
2288 */
2289 object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
2290 &error_fatal);
2291 object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
2292 &error_fatal);
2293 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
2294 &error_fatal);
2295 qdev_realize(DEVICE(xsrc), NULL, &local_err);
2296 if (local_err) {
2297 error_propagate(errp, local_err);
2298 return;
2299 }
2300
2301 object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
2302 &error_fatal);
2303 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
2304 &error_abort);
2305 qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
2306 if (local_err) {
2307 error_propagate(errp, local_err);
2308 return;
2309 }
2310
2311 /* XSCOM region, used for initial configuration of the BARs */
2312 memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
2313 &pnv_xive2_xscom_ops, xive, "xscom-xive",
2314 PNV10_XSCOM_XIVE2_SIZE << 3);
2315
2316 /* Interrupt controller MMIO regions */
2317 xive->ic_shift = 16;
2318 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
2319 PNV10_XIVE2_IC_SIZE);
2320
2321 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
2322 memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
2323 pnv_xive2_ic_regions[i].ops, xive,
2324 pnv_xive2_ic_regions[i].name,
2325 pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
2326 }
2327
2328 /*
2329 * VC MMIO regions.
2330 */
2331 xive->esb_shift = 16;
2332 xive->end_shift = 16;
2333 memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
2334 PNV10_XIVE2_ESB_SIZE);
2335 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
2336 PNV10_XIVE2_END_SIZE);
2337
2338 /* Presenter Controller MMIO region (not modeled) */
2339 xive->nvc_shift = 16;
2340 xive->nvpg_shift = 16;
2341 memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
2342 &pnv_xive2_nvc_ops, xive,
2343 "xive-nvc", PNV10_XIVE2_NVC_SIZE);
2344
2345 memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
2346 &pnv_xive2_nvpg_ops, xive,
2347 "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
2348
2349 /* Thread Interrupt Management Area (Direct) */
2350 xive->tm_shift = 16;
2351 memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
2352 xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
2353
2354 qemu_register_reset(pnv_xive2_reset, dev);
2355 }
2356
2357 static Property pnv_xive2_properties[] = {
2358 DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
2359 DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
2360 DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
2361 DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
2362 DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
2363 DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
2364 DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
2365 PNV_XIVE2_CAPABILITIES),
2366 DEFINE_PROP_UINT64("config", PnvXive2, config,
2367 PNV_XIVE2_CONFIGURATION),
2368 DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
2369 DEFINE_PROP_END_OF_LIST(),
2370 };
2371
pnv_xive2_instance_init(Object * obj)2372 static void pnv_xive2_instance_init(Object *obj)
2373 {
2374 PnvXive2 *xive = PNV_XIVE2(obj);
2375
2376 object_initialize_child(obj, "ipi_source", &xive->ipi_source,
2377 TYPE_XIVE_SOURCE);
2378 object_initialize_child(obj, "end_source", &xive->end_source,
2379 TYPE_XIVE2_END_SOURCE);
2380 }
2381
pnv_xive2_dt_xscom(PnvXScomInterface * dev,void * fdt,int xscom_offset)2382 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
2383 int xscom_offset)
2384 {
2385 const char compat_p10[] = "ibm,power10-xive-x";
2386 char *name;
2387 int offset;
2388 uint32_t reg[] = {
2389 cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
2390 cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
2391 };
2392
2393 name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
2394 offset = fdt_add_subnode(fdt, xscom_offset, name);
2395 _FDT(offset);
2396 g_free(name);
2397
2398 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2399 _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
2400 sizeof(compat_p10)));
2401 return 0;
2402 }
2403
pnv_xive2_class_init(ObjectClass * klass,void * data)2404 static void pnv_xive2_class_init(ObjectClass *klass, void *data)
2405 {
2406 DeviceClass *dc = DEVICE_CLASS(klass);
2407 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2408 Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
2409 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2410 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2411 PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
2412
2413 xdc->dt_xscom = pnv_xive2_dt_xscom;
2414
2415 dc->desc = "PowerNV XIVE2 Interrupt Controller (POWER10)";
2416 device_class_set_parent_realize(dc, pnv_xive2_realize,
2417 &pxc->parent_realize);
2418 device_class_set_props(dc, pnv_xive2_properties);
2419
2420 xrc->get_eas = pnv_xive2_get_eas;
2421 xrc->get_pq = pnv_xive2_get_pq;
2422 xrc->set_pq = pnv_xive2_set_pq;
2423 xrc->get_end = pnv_xive2_get_end;
2424 xrc->write_end = pnv_xive2_write_end;
2425 xrc->get_nvp = pnv_xive2_get_nvp;
2426 xrc->write_nvp = pnv_xive2_write_nvp;
2427 xrc->get_nvgc = pnv_xive2_get_nvgc;
2428 xrc->write_nvgc = pnv_xive2_write_nvgc;
2429 xrc->get_config = pnv_xive2_get_config;
2430 xrc->get_block_id = pnv_xive2_get_block_id;
2431
2432 xnc->notify = pnv_xive2_notify;
2433
2434 xpc->match_nvt = pnv_xive2_match_nvt;
2435 xpc->get_config = pnv_xive2_presenter_get_config;
2436 };
2437
2438 static const TypeInfo pnv_xive2_info = {
2439 .name = TYPE_PNV_XIVE2,
2440 .parent = TYPE_XIVE2_ROUTER,
2441 .instance_init = pnv_xive2_instance_init,
2442 .instance_size = sizeof(PnvXive2),
2443 .class_init = pnv_xive2_class_init,
2444 .class_size = sizeof(PnvXive2Class),
2445 .interfaces = (InterfaceInfo[]) {
2446 { TYPE_PNV_XSCOM_INTERFACE },
2447 { }
2448 }
2449 };
2450
pnv_xive2_register_types(void)2451 static void pnv_xive2_register_types(void)
2452 {
2453 type_register_static(&pnv_xive2_info);
2454 }
2455
type_init(pnv_xive2_register_types)2456 type_init(pnv_xive2_register_types)
2457
2458 /*
2459 * If the table is direct, we can compute the number of PQ entries
2460 * provisioned by FW.
2461 */
2462 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2463 {
2464 uint8_t blk = pnv_xive2_block_id(xive);
2465 uint64_t vsd = xive->vsds[VST_ESB][blk];
2466 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2467
2468 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2469 }
2470
2471 /*
2472 * Compute the number of entries per indirect subpage.
2473 */
pnv_xive2_vst_per_subpage(PnvXive2 * xive,uint32_t type)2474 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2475 {
2476 uint8_t blk = pnv_xive2_block_id(xive);
2477 uint64_t vsd = xive->vsds[type][blk];
2478 const XiveVstInfo *info = &vst_infos[type];
2479 uint64_t vsd_addr;
2480 uint32_t page_shift;
2481
2482 /* For direct tables, fake a valid value */
2483 if (!(VSD_INDIRECT & vsd)) {
2484 return 1;
2485 }
2486
2487 /* Get the page size of the indirect table. */
2488 vsd_addr = vsd & VSD_ADDRESS_MASK;
2489 ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2490
2491 if (!(vsd & VSD_ADDRESS_MASK)) {
2492 #ifdef XIVE2_DEBUG
2493 xive2_error(xive, "VST: invalid %s entry!?", info->name);
2494 #endif
2495 return 0;
2496 }
2497
2498 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2499
2500 if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2501 xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2502 page_shift);
2503 return 0;
2504 }
2505
2506 return (1ull << page_shift) / info->size;
2507 }
2508
pnv_xive2_pic_print_info(PnvXive2 * xive,GString * buf)2509 void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
2510 {
2511 Xive2Router *xrtr = XIVE2_ROUTER(xive);
2512 uint8_t blk = pnv_xive2_block_id(xive);
2513 uint8_t chip_id = xive->chip->chip_id;
2514 uint32_t srcno0 = XIVE_EAS(blk, 0);
2515 uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2516 Xive2Eas eas;
2517 Xive2End end;
2518 Xive2Nvp nvp;
2519 Xive2Nvgc nvgc;
2520 int i;
2521 uint64_t entries_per_subpage;
2522
2523 g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
2524 blk, srcno0, srcno0 + nr_esbs - 1);
2525 xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
2526
2527 g_string_append_printf(buf, "XIVE[%x] EAT %08x .. %08x\n",
2528 blk, srcno0, srcno0 + nr_esbs - 1);
2529 for (i = 0; i < nr_esbs; i++) {
2530 if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2531 break;
2532 }
2533 if (!xive2_eas_is_masked(&eas)) {
2534 xive2_eas_pic_print_info(&eas, i, buf);
2535 }
2536 }
2537
2538 g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
2539 chip_id, blk);
2540 i = 0;
2541 while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2542 xive2_end_eas_pic_print_info(&end, i++, buf);
2543 }
2544
2545 g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2546 i = 0;
2547 while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2548 xive2_end_pic_print_info(&end, i++, buf);
2549 }
2550
2551 g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
2552 chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2553 entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2554 for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
2555 while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2556 xive2_nvp_pic_print_info(&nvp, i++, buf);
2557 }
2558 }
2559
2560 g_string_append_printf(buf, "XIVE[%x] #%d NVGT %08x .. %08x\n",
2561 chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2562 entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVG);
2563 for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
2564 while (!xive2_router_get_nvgc(xrtr, false, blk, i, &nvgc)) {
2565 xive2_nvgc_pic_print_info(&nvgc, i++, buf);
2566 }
2567 }
2568
2569 g_string_append_printf(buf, "XIVE[%x] #%d NVCT %08x .. %08x\n",
2570 chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2571 entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVC);
2572 for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
2573 while (!xive2_router_get_nvgc(xrtr, true, blk, i, &nvgc)) {
2574 xive2_nvgc_pic_print_info(&nvgc, i++, buf);
2575 }
2576 }
2577 }
2578