xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision 80748eb4fbc70f0a3ae423f2c01cb5a4584d803f)
1  /*
2   * QEMU PowerPC XIVE interrupt controller model
3   *
4   * Copyright (c) 2017-2019, IBM Corporation.
5   *
6   * This code is licensed under the GPL version 2 or later. See the
7   * COPYING file in the top-level directory.
8   */
9  
10  #include "qemu/osdep.h"
11  #include "qemu/log.h"
12  #include "qemu/module.h"
13  #include "qapi/error.h"
14  #include "target/ppc/cpu.h"
15  #include "sysemu/cpus.h"
16  #include "sysemu/dma.h"
17  #include "sysemu/reset.h"
18  #include "hw/ppc/fdt.h"
19  #include "hw/ppc/pnv.h"
20  #include "hw/ppc/pnv_chip.h"
21  #include "hw/ppc/pnv_core.h"
22  #include "hw/ppc/pnv_xscom.h"
23  #include "hw/ppc/pnv_xive.h"
24  #include "hw/ppc/xive_regs.h"
25  #include "hw/qdev-properties.h"
26  #include "hw/ppc/ppc.h"
27  #include "trace.h"
28  
29  #include <libfdt.h>
30  
31  #include "pnv_xive_regs.h"
32  
33  #undef XIVE_DEBUG
34  
35  /*
36   * Virtual structures table (VST)
37   */
38  #define SBE_PER_BYTE   4
39  
40  typedef struct XiveVstInfo {
41      const char *name;
42      uint32_t    size;
43      uint32_t    max_blocks;
44  } XiveVstInfo;
45  
46  static const XiveVstInfo vst_infos[] = {
47      [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
48      [VST_TSEL_SBE]  = { "SBE",  1,               16 },
49      [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
50      [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
51  
52      /*
53       *  Interrupt fifo backing store table (not modeled) :
54       *
55       * 0 - IPI,
56       * 1 - HWD,
57       * 2 - First escalate,
58       * 3 - Second escalate,
59       * 4 - Redistribution,
60       * 5 - IPI cascaded queue ?
61       */
62      [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
63  };
64  
65  #define xive_error(xive, fmt, ...)                                      \
66      qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
67                    (xive)->chip->chip_id, ## __VA_ARGS__);
68  
69  /*
70   * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
71   * field overrides the hardwired chip ID in the Powerbus operations
72   * and for CAM compares
73   */
pnv_xive_block_id(PnvXive * xive)74  static uint8_t pnv_xive_block_id(PnvXive *xive)
75  {
76      uint8_t blk = xive->chip->chip_id;
77      uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
78  
79      if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
80          blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
81      }
82  
83      return blk;
84  }
85  
86  /*
87   * VST accessors for SBE, EAT, ENDT, NVT
88   *
89   * Indirect VST tables are arrays of VSDs pointing to a page (of same
90   * size). Each page is a direct VST table.
91   */
92  
93  #define XIVE_VSD_SIZE 8
94  
95  /* Indirect page size can be 4K, 64K, 2M, 16M. */
pnv_xive_vst_page_size_allowed(uint32_t page_shift)96  static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
97  {
98       return page_shift == 12 || page_shift == 16 ||
99           page_shift == 21 || page_shift == 24;
100  }
101  
pnv_xive_vst_addr_direct(PnvXive * xive,uint32_t type,uint64_t vsd,uint32_t idx)102  static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
103                                           uint64_t vsd, uint32_t idx)
104  {
105      const XiveVstInfo *info = &vst_infos[type];
106      uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
107      uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
108      uint32_t idx_max;
109  
110      idx_max = vst_tsize / info->size - 1;
111      if (idx > idx_max) {
112  #ifdef XIVE_DEBUG
113          xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
114                     info->name, idx, idx_max);
115  #endif
116          return 0;
117      }
118  
119      return vst_addr + idx * info->size;
120  }
121  
pnv_xive_vst_addr_indirect(PnvXive * xive,uint32_t type,uint64_t vsd,uint32_t idx)122  static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
123                                             uint64_t vsd, uint32_t idx)
124  {
125      const XiveVstInfo *info = &vst_infos[type];
126      uint64_t vsd_addr;
127      uint32_t vsd_idx;
128      uint32_t page_shift;
129      uint32_t vst_per_page;
130  
131      /* Get the page size of the indirect table. */
132      vsd_addr = vsd & VSD_ADDRESS_MASK;
133      if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
134                      MEMTXATTRS_UNSPECIFIED)) {
135          xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
136                     info->name, idx, vsd_addr);
137          return 0;
138      }
139  
140      if (!(vsd & VSD_ADDRESS_MASK)) {
141  #ifdef XIVE_DEBUG
142          xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
143  #endif
144          return 0;
145      }
146  
147      page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
148  
149      if (!pnv_xive_vst_page_size_allowed(page_shift)) {
150          xive_error(xive, "VST: invalid %s page shift %d", info->name,
151                     page_shift);
152          return 0;
153      }
154  
155      vst_per_page = (1ull << page_shift) / info->size;
156      vsd_idx = idx / vst_per_page;
157  
158      /* Load the VSD we are looking for, if not already done */
159      if (vsd_idx) {
160          vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
161          if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
162                         MEMTXATTRS_UNSPECIFIED)) {
163              xive_error(xive, "VST: failed to access %s entry %x @0x%"
164                         PRIx64, info->name, vsd_idx, vsd_addr);
165              return 0;
166          }
167  
168          if (!(vsd & VSD_ADDRESS_MASK)) {
169  #ifdef XIVE_DEBUG
170              xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
171  #endif
172              return 0;
173          }
174  
175          /*
176           * Check that the pages have a consistent size across the
177           * indirect table
178           */
179          if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
180              xive_error(xive, "VST: %s entry %x indirect page size differ !?",
181                         info->name, idx);
182              return 0;
183          }
184      }
185  
186      return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
187  }
188  
189  /*
190   * This is a simplified model of operation forwarding on a remote IC.
191   *
192   * A PC MMIO address is built to identify the NVT structure. The load
193   * on the remote IC will return the address of the structure in RAM,
194   * which will then be used by pnv_xive_vst_write/read to perform the
195   * RAM operation.
196   */
pnv_xive_vst_addr_remote(PnvXive * xive,uint32_t type,uint64_t vsd,uint8_t blk,uint32_t idx)197  static uint64_t pnv_xive_vst_addr_remote(PnvXive *xive, uint32_t type,
198                                           uint64_t vsd, uint8_t blk,
199                                           uint32_t idx)
200  {
201      const XiveVstInfo *info = &vst_infos[type];
202      uint64_t remote_addr = vsd & VSD_ADDRESS_MASK;
203      uint64_t vst_addr;
204      MemTxResult result;
205  
206      if (type != VST_TSEL_VPDT) {
207          xive_error(xive, "VST: invalid access on remote VST %s %x/%x !?",
208                     info->name, blk, idx);
209          return 0;
210      }
211  
212      remote_addr |= ((uint64_t)idx) << xive->pc_shift;
213  
214      vst_addr = address_space_ldq_be(&address_space_memory, remote_addr,
215                                      MEMTXATTRS_UNSPECIFIED, &result);
216      if (result != MEMTX_OK) {
217          xive_error(xive, "VST: read failed at @0x%"  HWADDR_PRIx
218                     " for NVT %x/%x\n", remote_addr, blk, idx);
219          return 0;
220      }
221  
222      return vst_addr;
223  }
224  
pnv_xive_vst_addr(PnvXive * xive,uint32_t type,uint8_t blk,uint32_t idx)225  static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
226                                    uint32_t idx)
227  {
228      const XiveVstInfo *info = &vst_infos[type];
229      uint64_t vsd;
230  
231      if (blk >= info->max_blocks) {
232          xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
233                     blk, info->name, idx);
234          return 0;
235      }
236  
237      vsd = xive->vsds[type][blk];
238  
239      /* Remote VST access */
240      if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
241          return pnv_xive_vst_addr_remote(xive, type, vsd, blk, idx);
242      }
243  
244      if (VSD_INDIRECT & vsd) {
245          return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
246      }
247  
248      return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
249  }
250  
pnv_xive_vst_read(PnvXive * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data)251  static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
252                               uint32_t idx, void *data)
253  {
254      const XiveVstInfo *info = &vst_infos[type];
255      uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
256      MemTxResult result;
257  
258      if (!addr) {
259          return -1;
260      }
261  
262      result = address_space_read(&address_space_memory, addr,
263                                  MEMTXATTRS_UNSPECIFIED, data,
264                                  info->size);
265      if (result != MEMTX_OK) {
266          xive_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
267                     " for VST %s %x/%x\n", addr, info->name, blk, idx);
268          return -1;
269      }
270      return 0;
271  }
272  
273  #define XIVE_VST_WORD_ALL -1
274  
pnv_xive_vst_write(PnvXive * xive,uint32_t type,uint8_t blk,uint32_t idx,void * data,uint32_t word_number)275  static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
276                                uint32_t idx, void *data, uint32_t word_number)
277  {
278      const XiveVstInfo *info = &vst_infos[type];
279      uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
280      MemTxResult result;
281  
282      if (!addr) {
283          return -1;
284      }
285  
286      if (word_number == XIVE_VST_WORD_ALL) {
287          result = address_space_write(&address_space_memory, addr,
288                                       MEMTXATTRS_UNSPECIFIED, data,
289                                       info->size);
290      } else {
291          result = address_space_write(&address_space_memory,
292                                       addr + word_number * 4,
293                                       MEMTXATTRS_UNSPECIFIED,
294                                       data + word_number * 4, 4);
295      }
296  
297      if (result != MEMTX_OK) {
298          xive_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
299                      "for VST %s %x/%x\n", addr, info->name, blk, idx);
300          return -1;
301      }
302      return 0;
303  }
304  
pnv_xive_get_end(XiveRouter * xrtr,uint8_t blk,uint32_t idx,XiveEND * end)305  static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
306                              XiveEND *end)
307  {
308      PnvXive *xive = PNV_XIVE(xrtr);
309  
310      if (pnv_xive_block_id(xive) != blk) {
311          xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
312          return -1;
313      }
314  
315      return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
316  }
317  
pnv_xive_write_end(XiveRouter * xrtr,uint8_t blk,uint32_t idx,XiveEND * end,uint8_t word_number)318  static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
319                                XiveEND *end, uint8_t word_number)
320  {
321      PnvXive *xive = PNV_XIVE(xrtr);
322  
323      if (pnv_xive_block_id(xive) != blk) {
324          xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
325          return -1;
326      }
327  
328      return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
329                                word_number);
330  }
331  
pnv_xive_end_update(PnvXive * xive)332  static int pnv_xive_end_update(PnvXive *xive)
333  {
334      uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
335                             xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
336      uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
337                             xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
338      int i;
339      uint64_t eqc_watch[4];
340  
341      for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
342          eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
343      }
344  
345      return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
346                                XIVE_VST_WORD_ALL);
347  }
348  
pnv_xive_end_cache_load(PnvXive * xive)349  static void pnv_xive_end_cache_load(PnvXive *xive)
350  {
351      uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
352                             xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
353      uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
354                             xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
355      uint64_t eqc_watch[4] = { 0 };
356      int i;
357  
358      if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
359          xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
360      }
361  
362      for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
363          xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
364      }
365  }
366  
pnv_xive_get_nvt(XiveRouter * xrtr,uint8_t blk,uint32_t idx,XiveNVT * nvt)367  static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
368                              XiveNVT *nvt)
369  {
370      return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
371  }
372  
pnv_xive_write_nvt(XiveRouter * xrtr,uint8_t blk,uint32_t idx,XiveNVT * nvt,uint8_t word_number)373  static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
374                                XiveNVT *nvt, uint8_t word_number)
375  {
376      return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
377                                word_number);
378  }
379  
pnv_xive_nvt_update(PnvXive * xive)380  static int pnv_xive_nvt_update(PnvXive *xive)
381  {
382      uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
383                             xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
384      uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
385                             xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
386      int i;
387      uint64_t vpc_watch[8];
388  
389      for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
390          vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
391      }
392  
393      return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
394                                XIVE_VST_WORD_ALL);
395  }
396  
pnv_xive_nvt_cache_load(PnvXive * xive)397  static void pnv_xive_nvt_cache_load(PnvXive *xive)
398  {
399      uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
400                             xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
401      uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
402                             xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
403      uint64_t vpc_watch[8] = { 0 };
404      int i;
405  
406      if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
407          xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
408      }
409  
410      for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
411          xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
412      }
413  }
414  
pnv_xive_get_eas(XiveRouter * xrtr,uint8_t blk,uint32_t idx,XiveEAS * eas)415  static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
416                              XiveEAS *eas)
417  {
418      PnvXive *xive = PNV_XIVE(xrtr);
419  
420      /*
421       * EAT lookups should be local to the IC
422       */
423      if (pnv_xive_block_id(xive) != blk) {
424          xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
425          return -1;
426      }
427  
428      return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
429  }
430  
pnv_xive_get_pq(XiveRouter * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)431  static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
432                             uint8_t *pq)
433  {
434      PnvXive *xive = PNV_XIVE(xrtr);
435  
436      if (pnv_xive_block_id(xive) != blk) {
437          xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
438          return -1;
439      }
440  
441      *pq = xive_source_esb_get(&xive->ipi_source, idx);
442      return 0;
443  }
444  
pnv_xive_set_pq(XiveRouter * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)445  static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
446                             uint8_t *pq)
447  {
448      PnvXive *xive = PNV_XIVE(xrtr);
449  
450      if (pnv_xive_block_id(xive) != blk) {
451          xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
452          return -1;
453      }
454  
455      *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
456      return 0;
457  }
458  
459  /*
460   * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
461   * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
462   * second register covers cores 16-23 (normal) or 8-11 (fused).
463   */
pnv_xive_is_cpu_enabled(PnvXive * xive,PowerPCCPU * cpu)464  static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
465  {
466      int pir = ppc_cpu_pir(cpu);
467      uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
468      uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
469      uint32_t bit = pir & 0x3f;
470  
471      return xive->regs[reg >> 3] & PPC_BIT(bit);
472  }
473  
pnv_xive_match_nvt(XivePresenter * xptr,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool cam_ignore,uint8_t priority,uint32_t logic_serv,XiveTCTXMatch * match)474  static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
475                                uint8_t nvt_blk, uint32_t nvt_idx,
476                                bool cam_ignore, uint8_t priority,
477                                uint32_t logic_serv, XiveTCTXMatch *match)
478  {
479      PnvXive *xive = PNV_XIVE(xptr);
480      PnvChip *chip = xive->chip;
481      int count = 0;
482      int i, j;
483  
484      for (i = 0; i < chip->nr_cores; i++) {
485          PnvCore *pc = chip->cores[i];
486          CPUCore *cc = CPU_CORE(pc);
487  
488          for (j = 0; j < cc->nr_threads; j++) {
489              PowerPCCPU *cpu = pc->threads[j];
490              XiveTCTX *tctx;
491              int ring;
492  
493              if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
494                  continue;
495              }
496  
497              tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
498  
499              /*
500               * Check the thread context CAM lines and record matches.
501               */
502              ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
503                                               nvt_idx, cam_ignore, logic_serv);
504              /*
505               * Save the context and follow on to catch duplicates, that we
506               * don't support yet.
507               */
508              if (ring != -1) {
509                  if (match->tctx) {
510                      qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
511                                    "thread context NVT %x/%x\n",
512                                    nvt_blk, nvt_idx);
513                      return -1;
514                  }
515  
516                  match->ring = ring;
517                  match->tctx = tctx;
518                  count++;
519              }
520          }
521      }
522  
523      return count;
524  }
525  
pnv_xive_presenter_get_config(XivePresenter * xptr)526  static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr)
527  {
528      uint32_t cfg = 0;
529  
530      /* TIMA GEN1 is all P9 knows */
531      cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
532  
533      return cfg;
534  }
535  
pnv_xive_get_block_id(XiveRouter * xrtr)536  static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
537  {
538      return pnv_xive_block_id(PNV_XIVE(xrtr));
539  }
540  
541  /*
542   * The TIMA MMIO space is shared among the chips and to identify the
543   * chip from which the access is being done, we extract the chip id
544   * from the PIR.
545   */
pnv_xive_tm_get_xive(PowerPCCPU * cpu)546  static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
547  {
548      int pir = ppc_cpu_pir(cpu);
549      XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
550      PnvXive *xive = PNV_XIVE(xptr);
551  
552      if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
553          xive_error(xive, "IC: CPU %x is not enabled", pir);
554      }
555      return xive;
556  }
557  
558  /*
559   * The internal sources (IPIs) of the interrupt controller have no
560   * knowledge of the XIVE chip on which they reside. Encode the block
561   * id in the source interrupt number before forwarding the source
562   * event notification to the Router. This is required on a multichip
563   * system.
564   */
pnv_xive_notify(XiveNotifier * xn,uint32_t srcno,bool pq_checked)565  static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
566  {
567      PnvXive *xive = PNV_XIVE(xn);
568      uint8_t blk = pnv_xive_block_id(xive);
569  
570      xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
571  }
572  
573  /*
574   * XIVE helpers
575   */
576  
pnv_xive_vc_size(PnvXive * xive)577  static uint64_t pnv_xive_vc_size(PnvXive *xive)
578  {
579      return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
580  }
581  
pnv_xive_edt_shift(PnvXive * xive)582  static uint64_t pnv_xive_edt_shift(PnvXive *xive)
583  {
584      return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
585  }
586  
pnv_xive_pc_size(PnvXive * xive)587  static uint64_t pnv_xive_pc_size(PnvXive *xive)
588  {
589      return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
590  }
591  
pnv_xive_nr_ipis(PnvXive * xive,uint8_t blk)592  static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
593  {
594      uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
595      uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
596  
597      return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
598  }
599  
600  /*
601   * Compute the number of entries per indirect subpage.
602   */
pnv_xive_vst_per_subpage(PnvXive * xive,uint32_t type)603  static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
604  {
605      uint8_t blk = pnv_xive_block_id(xive);
606      uint64_t vsd = xive->vsds[type][blk];
607      const XiveVstInfo *info = &vst_infos[type];
608      uint64_t vsd_addr;
609      uint32_t page_shift;
610  
611      /* For direct tables, fake a valid value */
612      if (!(VSD_INDIRECT & vsd)) {
613          return 1;
614      }
615  
616      /* Get the page size of the indirect table. */
617      vsd_addr = vsd & VSD_ADDRESS_MASK;
618      if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
619                     MEMTXATTRS_UNSPECIFIED)) {
620          xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
621                     info->name, vsd_addr);
622          return 0;
623      }
624  
625      if (!(vsd & VSD_ADDRESS_MASK)) {
626  #ifdef XIVE_DEBUG
627          xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
628  #endif
629          return 0;
630      }
631  
632      page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
633  
634      if (!pnv_xive_vst_page_size_allowed(page_shift)) {
635          xive_error(xive, "VST: invalid %s page shift %d", info->name,
636                     page_shift);
637          return 0;
638      }
639  
640      return (1ull << page_shift) / info->size;
641  }
642  
643  /*
644   * EDT Table
645   *
646   * The Virtualization Controller MMIO region containing the IPI ESB
647   * pages and END ESB pages is sub-divided into "sets" which map
648   * portions of the VC region to the different ESB pages. It is
649   * configured at runtime through the EDT "Domain Table" to let the
650   * firmware decide how to split the VC address space between IPI ESB
651   * pages and END ESB pages.
652   */
653  
654  /*
655   * Computes the overall size of the IPI or the END ESB pages
656   */
pnv_xive_edt_size(PnvXive * xive,uint64_t type)657  static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
658  {
659      uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
660      uint64_t size = 0;
661      int i;
662  
663      for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
664          uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
665  
666          if (edt_type == type) {
667              size += edt_size;
668          }
669      }
670  
671      return size;
672  }
673  
674  /*
675   * Maps an offset of the VC region in the IPI or END region using the
676   * layout defined by the EDT "Domaine Table"
677   */
pnv_xive_edt_offset(PnvXive * xive,uint64_t vc_offset,uint64_t type)678  static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
679                                                uint64_t type)
680  {
681      int i;
682      uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
683      uint64_t edt_offset = vc_offset;
684  
685      for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
686          uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
687  
688          if (edt_type != type) {
689              edt_offset -= edt_size;
690          }
691      }
692  
693      return edt_offset;
694  }
695  
pnv_xive_edt_resize(PnvXive * xive)696  static void pnv_xive_edt_resize(PnvXive *xive)
697  {
698      uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
699      uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
700  
701      memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
702      memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
703  
704      memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
705      memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
706  }
707  
708  /*
709   * XIVE Table configuration. Only EDT is supported.
710   */
pnv_xive_table_set_data(PnvXive * xive,uint64_t val)711  static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
712  {
713      uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
714      uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
715      uint64_t *xive_table;
716      uint8_t max_index;
717  
718      switch (tsel) {
719      case CQ_TAR_TSEL_BLK:
720          max_index = ARRAY_SIZE(xive->blk);
721          xive_table = xive->blk;
722          break;
723      case CQ_TAR_TSEL_MIG:
724          max_index = ARRAY_SIZE(xive->mig);
725          xive_table = xive->mig;
726          break;
727      case CQ_TAR_TSEL_EDT:
728          max_index = ARRAY_SIZE(xive->edt);
729          xive_table = xive->edt;
730          break;
731      case CQ_TAR_TSEL_VDT:
732          max_index = ARRAY_SIZE(xive->vdt);
733          xive_table = xive->vdt;
734          break;
735      default:
736          xive_error(xive, "IC: invalid table %d", (int) tsel);
737          return -1;
738      }
739  
740      if (tsel_index >= max_index) {
741          xive_error(xive, "IC: invalid index %d", (int) tsel_index);
742          return -1;
743      }
744  
745      xive_table[tsel_index] = val;
746  
747      if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
748          xive->regs[CQ_TAR >> 3] =
749              SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
750      }
751  
752      /*
753       * EDT configuration is complete. Resize the MMIO windows exposing
754       * the IPI and the END ESBs in the VC region.
755       */
756      if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
757          pnv_xive_edt_resize(xive);
758      }
759  
760      return 0;
761  }
762  
763  /*
764   * Virtual Structure Tables (VST) configuration
765   */
pnv_xive_vst_set_exclusive(PnvXive * xive,uint8_t type,uint8_t blk,uint64_t vsd)766  static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
767                                         uint8_t blk, uint64_t vsd)
768  {
769      XiveENDSource *end_xsrc = &xive->end_source;
770      XiveSource *xsrc = &xive->ipi_source;
771      const XiveVstInfo *info = &vst_infos[type];
772      uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
773      uint64_t vst_tsize = 1ull << page_shift;
774      uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
775  
776      /* Basic checks */
777  
778      if (VSD_INDIRECT & vsd) {
779          if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
780              xive_error(xive, "VST: %s indirect tables are not enabled",
781                         info->name);
782              return;
783          }
784  
785          if (!pnv_xive_vst_page_size_allowed(page_shift)) {
786              xive_error(xive, "VST: invalid %s page shift %d", info->name,
787                         page_shift);
788              return;
789          }
790      }
791  
792      if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
793          xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
794                     " page shift %d", info->name, vst_addr, page_shift);
795          return;
796      }
797  
798      /* Record the table configuration (in SRAM on HW) */
799      xive->vsds[type][blk] = vsd;
800  
801      /* Now tune the models with the configuration provided by the FW */
802  
803      switch (type) {
804      case VST_TSEL_IVT:  /* Nothing to be done */
805          break;
806  
807      case VST_TSEL_EQDT:
808          /*
809           * Backing store pages for the END.
810           *
811           * If the table is direct, we can compute the number of PQ
812           * entries provisioned by FW (such as skiboot) and resize the
813           * END ESB window accordingly.
814           */
815          if (!(VSD_INDIRECT & vsd)) {
816              memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
817                                     * (1ull << xsrc->esb_shift));
818          }
819          memory_region_add_subregion(&xive->end_edt_mmio, 0,
820                                      &end_xsrc->esb_mmio);
821          break;
822  
823      case VST_TSEL_SBE:
824          /*
825           * Backing store pages for the source PQ bits. The model does
826           * not use these PQ bits backed in RAM because the XiveSource
827           * model has its own.
828           *
829           * If the table is direct, we can compute the number of PQ
830           * entries provisioned by FW (such as skiboot) and resize the
831           * ESB window accordingly.
832           */
833          if (!(VSD_INDIRECT & vsd)) {
834              memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
835                                     * (1ull << xsrc->esb_shift));
836          }
837          memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
838          break;
839  
840      case VST_TSEL_VPDT: /* Not modeled */
841      case VST_TSEL_IRQ:  /* Not modeled */
842          /*
843           * These tables contains the backing store pages for the
844           * interrupt fifos of the VC sub-engine in case of overflow.
845           */
846          break;
847  
848      default:
849          g_assert_not_reached();
850      }
851  }
852  
853  /*
854   * Both PC and VC sub-engines are configured as each use the Virtual
855   * Structure Tables : SBE, EAS, END and NVT.
856   */
pnv_xive_vst_set_data(PnvXive * xive,uint64_t vsd,bool pc_engine)857  static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
858  {
859      uint8_t mode = GETFIELD(VSD_MODE, vsd);
860      uint8_t type = GETFIELD(VST_TABLE_SELECT,
861                              xive->regs[VC_VSD_TABLE_ADDR >> 3]);
862      uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
863                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
864      uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
865  
866      if (type > VST_TSEL_IRQ) {
867          xive_error(xive, "VST: invalid table type %d", type);
868          return;
869      }
870  
871      if (blk >= vst_infos[type].max_blocks) {
872          xive_error(xive, "VST: invalid block id %d for"
873                        " %s table", blk, vst_infos[type].name);
874          return;
875      }
876  
877      /*
878       * Only take the VC sub-engine configuration into account because
879       * the XiveRouter model combines both VC and PC sub-engines
880       */
881      if (pc_engine) {
882          return;
883      }
884  
885      if (!vst_addr) {
886          xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
887          return;
888      }
889  
890      switch (mode) {
891      case VSD_MODE_FORWARD:
892          xive->vsds[type][blk] = vsd;
893          break;
894  
895      case VSD_MODE_EXCLUSIVE:
896          pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
897          break;
898  
899      default:
900          xive_error(xive, "VST: unsupported table mode %d", mode);
901          return;
902      }
903  }
904  
905  /*
906   * Interrupt controller MMIO region. The layout is compatible between
907   * 4K and 64K pages :
908   *
909   * Page 0           sub-engine BARs
910   *  0x000 - 0x3FF   IC registers
911   *  0x400 - 0x7FF   PC registers
912   *  0x800 - 0xFFF   VC registers
913   *
914   * Page 1           Notify page (writes only)
915   *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
916   *  0x800 - 0xFFF   forwards and syncs
917   *
918   * Page 2           LSI Trigger page (writes only) (not modeled)
919   * Page 3           LSI SB EOI page (reads only) (not modeled)
920   *
921   * Page 4-7         indirect TIMA
922   */
923  
924  /*
925   * IC - registers MMIO
926   */
pnv_xive_ic_reg_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)927  static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
928                                    uint64_t val, unsigned size)
929  {
930      PnvXive *xive = PNV_XIVE(opaque);
931      MemoryRegion *sysmem = get_system_memory();
932      uint32_t reg = offset >> 3;
933      bool is_chip0 = xive->chip->chip_id == 0;
934  
935      switch (offset) {
936  
937      /*
938       * XIVE CQ (PowerBus bridge) settings
939       */
940      case CQ_MSGSND:     /* msgsnd for doorbells */
941      case CQ_FIRMASK_OR: /* FIR error reporting */
942          break;
943      case CQ_PBI_CTL:
944          if (val & CQ_PBI_PC_64K) {
945              xive->pc_shift = 16;
946          }
947          if (val & CQ_PBI_VC_64K) {
948              xive->vc_shift = 16;
949          }
950          break;
951      case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
952          /*
953           * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
954           */
955          break;
956  
957      /*
958       * XIVE Virtualization Controller settings
959       */
960      case VC_GLOBAL_CONFIG:
961          break;
962  
963      /*
964       * XIVE Presenter Controller settings
965       */
966      case PC_GLOBAL_CONFIG:
967          /*
968           * PC_GCONF_CHIPID_OVR
969           *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
970           */
971          break;
972      case PC_TCTXT_CFG:
973          /*
974           * TODO: block group support
975           */
976          break;
977      case PC_TCTXT_TRACK:
978          /*
979           * PC_TCTXT_TRACK_EN:
980           *   enable block tracking and exchange of block ownership
981           *   information between Interrupt controllers
982           */
983          break;
984  
985      /*
986       * Misc settings
987       */
988      case VC_SBC_CONFIG: /* Store EOI configuration */
989          /*
990           * Configure store EOI if required by firmware (skiboot has removed
991           * support recently though)
992           */
993          if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
994              xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
995          }
996          break;
997  
998      case VC_EQC_CONFIG: /* TODO: silent escalation */
999      case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
1000          break;
1001  
1002      /*
1003       * XIVE BAR settings (XSCOM only)
1004       */
1005      case CQ_RST_CTL:
1006          /* bit4: resets all BAR registers */
1007          break;
1008  
1009      case CQ_IC_BAR: /* IC BAR. 8 pages */
1010          xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
1011          if (!(val & CQ_IC_BAR_VALID)) {
1012              xive->ic_base = 0;
1013              if (xive->regs[reg] & CQ_IC_BAR_VALID) {
1014                  memory_region_del_subregion(&xive->ic_mmio,
1015                                              &xive->ic_reg_mmio);
1016                  memory_region_del_subregion(&xive->ic_mmio,
1017                                              &xive->ic_notify_mmio);
1018                  memory_region_del_subregion(&xive->ic_mmio,
1019                                              &xive->ic_lsi_mmio);
1020                  memory_region_del_subregion(&xive->ic_mmio,
1021                                              &xive->tm_indirect_mmio);
1022  
1023                  memory_region_del_subregion(sysmem, &xive->ic_mmio);
1024              }
1025          } else {
1026              xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
1027              if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
1028                  memory_region_add_subregion(sysmem, xive->ic_base,
1029                                              &xive->ic_mmio);
1030  
1031                  memory_region_add_subregion(&xive->ic_mmio,  0,
1032                                              &xive->ic_reg_mmio);
1033                  memory_region_add_subregion(&xive->ic_mmio,
1034                                              1ul << xive->ic_shift,
1035                                              &xive->ic_notify_mmio);
1036                  memory_region_add_subregion(&xive->ic_mmio,
1037                                              2ul << xive->ic_shift,
1038                                              &xive->ic_lsi_mmio);
1039                  memory_region_add_subregion(&xive->ic_mmio,
1040                                              4ull << xive->ic_shift,
1041                                              &xive->tm_indirect_mmio);
1042              }
1043          }
1044          break;
1045  
1046      case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
1047      case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
1048          xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1049          if (!(val & CQ_TM_BAR_VALID)) {
1050              xive->tm_base = 0;
1051              if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
1052                  memory_region_del_subregion(sysmem, &xive->tm_mmio);
1053              }
1054          } else {
1055              xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1056              if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
1057                  memory_region_add_subregion(sysmem, xive->tm_base,
1058                                              &xive->tm_mmio);
1059              }
1060          }
1061          break;
1062  
1063      case CQ_PC_BARM:
1064          xive->regs[reg] = val;
1065          memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
1066          break;
1067      case CQ_PC_BAR: /* From 32M to 512G */
1068          if (!(val & CQ_PC_BAR_VALID)) {
1069              xive->pc_base = 0;
1070              if (xive->regs[reg] & CQ_PC_BAR_VALID) {
1071                  memory_region_del_subregion(sysmem, &xive->pc_mmio);
1072              }
1073          } else {
1074              xive->pc_base = val & ~(CQ_PC_BAR_VALID);
1075              if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
1076                  memory_region_add_subregion(sysmem, xive->pc_base,
1077                                              &xive->pc_mmio);
1078              }
1079          }
1080          break;
1081  
1082      case CQ_VC_BARM:
1083          xive->regs[reg] = val;
1084          memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1085          break;
1086      case CQ_VC_BAR: /* From 64M to 4TB */
1087          if (!(val & CQ_VC_BAR_VALID)) {
1088              xive->vc_base = 0;
1089              if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1090                  memory_region_del_subregion(sysmem, &xive->vc_mmio);
1091              }
1092          } else {
1093              xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1094              if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1095                  memory_region_add_subregion(sysmem, xive->vc_base,
1096                                              &xive->vc_mmio);
1097              }
1098          }
1099          break;
1100  
1101      /*
1102       * XIVE Table settings.
1103       */
1104      case CQ_TAR: /* Table Address */
1105          break;
1106      case CQ_TDR: /* Table Data */
1107          pnv_xive_table_set_data(xive, val);
1108          break;
1109  
1110      /*
1111       * XIVE VC & PC Virtual Structure Table settings
1112       */
1113      case VC_VSD_TABLE_ADDR:
1114      case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1115          break;
1116      case VC_VSD_TABLE_DATA: /* Virtual table setting */
1117      case PC_VSD_TABLE_DATA:
1118          pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1119          break;
1120  
1121      /*
1122       * Interrupt fifo overflow in memory backing store (Not modeled)
1123       */
1124      case VC_IRQ_CONFIG_IPI:
1125      case VC_IRQ_CONFIG_HW:
1126      case VC_IRQ_CONFIG_CASCADE1:
1127      case VC_IRQ_CONFIG_CASCADE2:
1128      case VC_IRQ_CONFIG_REDIST:
1129      case VC_IRQ_CONFIG_IPI_CASC:
1130          break;
1131  
1132      /*
1133       * XIVE hardware thread enablement
1134       */
1135      case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1136      case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1137          break;
1138  
1139      case PC_THREAD_EN_REG0_SET:
1140          xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1141          break;
1142      case PC_THREAD_EN_REG1_SET:
1143          xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1144          break;
1145      case PC_THREAD_EN_REG0_CLR:
1146          xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1147          break;
1148      case PC_THREAD_EN_REG1_CLR:
1149          xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1150          break;
1151  
1152      /*
1153       * Indirect TIMA access set up. Defines the PIR of the HW thread
1154       * to use.
1155       */
1156      case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1157          break;
1158  
1159      /*
1160       * XIVE PC & VC cache updates for EAS, NVT and END
1161       */
1162      case VC_IVC_SCRUB_MASK:
1163      case VC_IVC_SCRUB_TRIG:
1164          break;
1165  
1166      case VC_EQC_CWATCH_SPEC:
1167          val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1168          break;
1169      case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1170          break;
1171      case VC_EQC_CWATCH_DAT0:
1172          /* writing to DATA0 triggers the cache write */
1173          xive->regs[reg] = val;
1174          pnv_xive_end_update(xive);
1175          break;
1176      case VC_EQC_SCRUB_MASK:
1177      case VC_EQC_SCRUB_TRIG:
1178          /*
1179           * The scrubbing registers flush the cache in RAM and can also
1180           * invalidate.
1181           */
1182          break;
1183  
1184      case PC_VPC_CWATCH_SPEC:
1185          val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1186          break;
1187      case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1188          break;
1189      case PC_VPC_CWATCH_DAT0:
1190          /* writing to DATA0 triggers the cache write */
1191          xive->regs[reg] = val;
1192          pnv_xive_nvt_update(xive);
1193          break;
1194      case PC_VPC_SCRUB_MASK:
1195      case PC_VPC_SCRUB_TRIG:
1196          /*
1197           * The scrubbing registers flush the cache in RAM and can also
1198           * invalidate.
1199           */
1200          break;
1201  
1202  
1203      /*
1204       * XIVE PC & VC cache invalidation
1205       */
1206      case PC_AT_KILL:
1207          break;
1208      case VC_AT_MACRO_KILL:
1209          break;
1210      case PC_AT_KILL_MASK:
1211      case VC_AT_MACRO_KILL_MASK:
1212          break;
1213  
1214      default:
1215          xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1216          return;
1217      }
1218  
1219      xive->regs[reg] = val;
1220  }
1221  
pnv_xive_ic_reg_read(void * opaque,hwaddr offset,unsigned size)1222  static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1223  {
1224      PnvXive *xive = PNV_XIVE(opaque);
1225      uint64_t val = 0;
1226      uint32_t reg = offset >> 3;
1227  
1228      switch (offset) {
1229      case CQ_CFG_PB_GEN:
1230      case CQ_IC_BAR:
1231      case CQ_TM1_BAR:
1232      case CQ_TM2_BAR:
1233      case CQ_PC_BAR:
1234      case CQ_PC_BARM:
1235      case CQ_VC_BAR:
1236      case CQ_VC_BARM:
1237      case CQ_TAR:
1238      case CQ_TDR:
1239      case CQ_PBI_CTL:
1240  
1241      case PC_TCTXT_CFG:
1242      case PC_TCTXT_TRACK:
1243      case PC_TCTXT_INDIR0:
1244      case PC_TCTXT_INDIR1:
1245      case PC_TCTXT_INDIR2:
1246      case PC_TCTXT_INDIR3:
1247      case PC_GLOBAL_CONFIG:
1248  
1249      case PC_VPC_SCRUB_MASK:
1250  
1251      case VC_GLOBAL_CONFIG:
1252      case VC_AIB_TX_ORDER_TAG2:
1253  
1254      case VC_IRQ_CONFIG_IPI:
1255      case VC_IRQ_CONFIG_HW:
1256      case VC_IRQ_CONFIG_CASCADE1:
1257      case VC_IRQ_CONFIG_CASCADE2:
1258      case VC_IRQ_CONFIG_REDIST:
1259      case VC_IRQ_CONFIG_IPI_CASC:
1260  
1261      case VC_EQC_SCRUB_MASK:
1262      case VC_IVC_SCRUB_MASK:
1263      case VC_SBC_CONFIG:
1264      case VC_AT_MACRO_KILL_MASK:
1265      case VC_VSD_TABLE_ADDR:
1266      case PC_VSD_TABLE_ADDR:
1267      case VC_VSD_TABLE_DATA:
1268      case PC_VSD_TABLE_DATA:
1269      case PC_THREAD_EN_REG0:
1270      case PC_THREAD_EN_REG1:
1271          val = xive->regs[reg];
1272          break;
1273  
1274      /*
1275       * XIVE hardware thread enablement
1276       */
1277      case PC_THREAD_EN_REG0_SET:
1278      case PC_THREAD_EN_REG0_CLR:
1279          val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1280          break;
1281      case PC_THREAD_EN_REG1_SET:
1282      case PC_THREAD_EN_REG1_CLR:
1283          val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1284          break;
1285  
1286      case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1287          val = 0xffffff0000000000;
1288          break;
1289  
1290      /*
1291       * XIVE PC & VC cache updates for EAS, NVT and END
1292       */
1293      case VC_EQC_CWATCH_SPEC:
1294          xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1295          val = xive->regs[reg];
1296          break;
1297      case VC_EQC_CWATCH_DAT0:
1298          /*
1299           * Load DATA registers from cache with data requested by the
1300           * SPEC register
1301           */
1302          pnv_xive_end_cache_load(xive);
1303          val = xive->regs[reg];
1304          break;
1305      case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1306          val = xive->regs[reg];
1307          break;
1308  
1309      case PC_VPC_CWATCH_SPEC:
1310          xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1311          val = xive->regs[reg];
1312          break;
1313      case PC_VPC_CWATCH_DAT0:
1314          /*
1315           * Load DATA registers from cache with data requested by the
1316           * SPEC register
1317           */
1318          pnv_xive_nvt_cache_load(xive);
1319          val = xive->regs[reg];
1320          break;
1321      case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1322          val = xive->regs[reg];
1323          break;
1324  
1325      case PC_VPC_SCRUB_TRIG:
1326      case VC_IVC_SCRUB_TRIG:
1327      case VC_EQC_SCRUB_TRIG:
1328          xive->regs[reg] &= ~VC_SCRUB_VALID;
1329          val = xive->regs[reg];
1330          break;
1331  
1332      /*
1333       * XIVE PC & VC cache invalidation
1334       */
1335      case PC_AT_KILL:
1336          xive->regs[reg] &= ~PC_AT_KILL_VALID;
1337          val = xive->regs[reg];
1338          break;
1339      case VC_AT_MACRO_KILL:
1340          xive->regs[reg] &= ~VC_KILL_VALID;
1341          val = xive->regs[reg];
1342          break;
1343  
1344      /*
1345       * XIVE synchronisation
1346       */
1347      case VC_EQC_CONFIG:
1348          val = VC_EQC_SYNC_MASK;
1349          break;
1350  
1351      default:
1352          xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1353      }
1354  
1355      return val;
1356  }
1357  
1358  static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1359      .read = pnv_xive_ic_reg_read,
1360      .write = pnv_xive_ic_reg_write,
1361      .endianness = DEVICE_BIG_ENDIAN,
1362      .valid = {
1363          .min_access_size = 8,
1364          .max_access_size = 8,
1365      },
1366      .impl = {
1367          .min_access_size = 8,
1368          .max_access_size = 8,
1369      },
1370  };
1371  
1372  /*
1373   * IC - Notify MMIO port page (write only)
1374   */
1375  #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1376  #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1377  #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1378  #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1379  #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1380  #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1381  #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1382  #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1383  
1384  /* VC synchronisation */
1385  #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1386  #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1387  #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1388  #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1389  #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1390  
1391  /* PC synchronisation */
1392  #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1393  #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1394  #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1395  
pnv_xive_end_notify(XiveRouter * xrtr,XiveEAS * eas)1396  static void pnv_xive_end_notify(XiveRouter *xrtr, XiveEAS *eas)
1397  {
1398      PnvXive *xive = PNV_XIVE(xrtr);
1399      uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
1400      uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
1401      uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w);
1402      uint64_t end_vsd = xive->vsds[VST_TSEL_EQDT][end_blk];
1403  
1404      switch (GETFIELD(VSD_MODE, end_vsd)) {
1405      case VSD_MODE_EXCLUSIVE:
1406          /* Perform the END notification on the local IC. */
1407          xive_router_end_notify(xrtr, eas);
1408          break;
1409  
1410      case VSD_MODE_FORWARD: {
1411          MemTxResult result;
1412          uint64_t notif_port = end_vsd & VSD_ADDRESS_MASK;
1413          uint64_t data = XIVE_TRIGGER_END | XIVE_TRIGGER_PQ |
1414              be64_to_cpu(eas->w);
1415  
1416          /* Forward the store on the remote IC notify page. */
1417          address_space_stq_be(&address_space_memory, notif_port, data,
1418                               MEMTXATTRS_UNSPECIFIED, &result);
1419          if (result != MEMTX_OK) {
1420              xive_error(xive, "IC: Forward notif END %x/%x [%x] failed @%"
1421                         HWADDR_PRIx, end_blk, end_idx, end_data, notif_port);
1422              return;
1423          }
1424          break;
1425      }
1426  
1427      case VSD_MODE_INVALID:
1428      default:
1429          /* Set FIR */
1430          xive_error(xive, "IC: Invalid END VSD for block %x", end_blk);
1431          return;
1432      }
1433  }
1434  
1435  /*
1436   * The notify page can either be used to receive trigger events from
1437   * the HW controllers (PHB, PSI) or to reroute interrupts between
1438   * Interrupt controllers.
1439   */
pnv_xive_ic_hw_trigger(PnvXive * xive,hwaddr addr,uint64_t val)1440  static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1441  {
1442      uint8_t blk;
1443      uint32_t idx;
1444  
1445      trace_pnv_xive_ic_hw_trigger(addr, val);
1446  
1447      if (val & XIVE_TRIGGER_END) {
1448          val = cpu_to_be64(val);
1449          pnv_xive_end_notify(XIVE_ROUTER(xive), (XiveEAS *) &val);
1450          return;
1451      }
1452  
1453      /*
1454       * Forward the source event notification directly to the Router.
1455       * The source interrupt number should already be correctly encoded
1456       * with the chip block id by the sending device (PHB, PSI).
1457       */
1458      blk = XIVE_EAS_BLOCK(val);
1459      idx = XIVE_EAS_INDEX(val);
1460  
1461      xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1462                         !!(val & XIVE_TRIGGER_PQ));
1463  }
1464  
pnv_xive_ic_notify_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1465  static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1466                                       unsigned size)
1467  {
1468      PnvXive *xive = PNV_XIVE(opaque);
1469  
1470      /* VC: HW triggers */
1471      switch (addr) {
1472      case 0x000 ... 0x7FF:
1473          pnv_xive_ic_hw_trigger(opaque, addr, val);
1474          break;
1475  
1476      /* VC: Forwarded IRQs */
1477      case PNV_XIVE_FORWARD_IPI:
1478      case PNV_XIVE_FORWARD_HW:
1479      case PNV_XIVE_FORWARD_OS_ESC:
1480      case PNV_XIVE_FORWARD_HW_ESC:
1481      case PNV_XIVE_FORWARD_REDIS:
1482          /* TODO: forwarded IRQs. Should be like HW triggers */
1483          xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1484                     addr, val);
1485          break;
1486  
1487      /* VC syncs */
1488      case PNV_XIVE_SYNC_IPI:
1489      case PNV_XIVE_SYNC_HW:
1490      case PNV_XIVE_SYNC_OS_ESC:
1491      case PNV_XIVE_SYNC_HW_ESC:
1492      case PNV_XIVE_SYNC_REDIS:
1493          break;
1494  
1495      /* PC syncs */
1496      case PNV_XIVE_SYNC_PULL:
1497      case PNV_XIVE_SYNC_PUSH:
1498      case PNV_XIVE_SYNC_VPC:
1499          break;
1500  
1501      default:
1502          xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1503      }
1504  }
1505  
pnv_xive_ic_notify_read(void * opaque,hwaddr addr,unsigned size)1506  static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1507                                          unsigned size)
1508  {
1509      PnvXive *xive = PNV_XIVE(opaque);
1510  
1511      /* loads are invalid */
1512      xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1513      return -1;
1514  }
1515  
1516  static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1517      .read = pnv_xive_ic_notify_read,
1518      .write = pnv_xive_ic_notify_write,
1519      .endianness = DEVICE_BIG_ENDIAN,
1520      .valid = {
1521          .min_access_size = 8,
1522          .max_access_size = 8,
1523      },
1524      .impl = {
1525          .min_access_size = 8,
1526          .max_access_size = 8,
1527      },
1528  };
1529  
1530  /*
1531   * IC - LSI MMIO handlers (not modeled)
1532   */
1533  
pnv_xive_ic_lsi_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1534  static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1535                                uint64_t val, unsigned size)
1536  {
1537      PnvXive *xive = PNV_XIVE(opaque);
1538  
1539      xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1540  }
1541  
pnv_xive_ic_lsi_read(void * opaque,hwaddr addr,unsigned size)1542  static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1543  {
1544      PnvXive *xive = PNV_XIVE(opaque);
1545  
1546      xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1547      return -1;
1548  }
1549  
1550  static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1551      .read = pnv_xive_ic_lsi_read,
1552      .write = pnv_xive_ic_lsi_write,
1553      .endianness = DEVICE_BIG_ENDIAN,
1554      .valid = {
1555          .min_access_size = 8,
1556          .max_access_size = 8,
1557      },
1558      .impl = {
1559          .min_access_size = 8,
1560          .max_access_size = 8,
1561      },
1562  };
1563  
1564  /*
1565   * IC - Indirect TIMA MMIO handlers
1566   */
1567  
1568  /*
1569   * When the TIMA is accessed from the indirect page, the thread id of
1570   * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1571   * use. This is used for resets and for debug purpose also.
1572   */
pnv_xive_get_indirect_tctx(PnvXive * xive)1573  static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1574  {
1575      PnvChip *chip = xive->chip;
1576      uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1577      PowerPCCPU *cpu = NULL;
1578      int pir;
1579  
1580      if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1581          xive_error(xive, "IC: no indirect TIMA access in progress");
1582          return NULL;
1583      }
1584  
1585      pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1586      cpu = pnv_chip_find_cpu(chip, pir);
1587      if (!cpu) {
1588          xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1589          return NULL;
1590      }
1591  
1592      /* Check that HW thread is XIVE enabled */
1593      if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1594          xive_error(xive, "IC: CPU %x is not enabled", pir);
1595      }
1596  
1597      return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1598  }
1599  
xive_tm_indirect_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)1600  static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1601                                     uint64_t value, unsigned size)
1602  {
1603      XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1604  
1605      xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1606  }
1607  
xive_tm_indirect_read(void * opaque,hwaddr offset,unsigned size)1608  static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1609                                        unsigned size)
1610  {
1611      XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1612  
1613      return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1614  }
1615  
1616  static const MemoryRegionOps xive_tm_indirect_ops = {
1617      .read = xive_tm_indirect_read,
1618      .write = xive_tm_indirect_write,
1619      .endianness = DEVICE_BIG_ENDIAN,
1620      .valid = {
1621          .min_access_size = 1,
1622          .max_access_size = 8,
1623      },
1624      .impl = {
1625          .min_access_size = 1,
1626          .max_access_size = 8,
1627      },
1628  };
1629  
pnv_xive_tm_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)1630  static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1631                                uint64_t value, unsigned size)
1632  {
1633      PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1634      PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1635      XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1636  
1637      xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1638  }
1639  
pnv_xive_tm_read(void * opaque,hwaddr offset,unsigned size)1640  static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1641  {
1642      PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1643      PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1644      XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1645  
1646      return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1647  }
1648  
1649  const MemoryRegionOps pnv_xive_tm_ops = {
1650      .read = pnv_xive_tm_read,
1651      .write = pnv_xive_tm_write,
1652      .endianness = DEVICE_BIG_ENDIAN,
1653      .valid = {
1654          .min_access_size = 1,
1655          .max_access_size = 8,
1656      },
1657      .impl = {
1658          .min_access_size = 1,
1659          .max_access_size = 8,
1660      },
1661  };
1662  
1663  /*
1664   * Interrupt controller XSCOM region.
1665   */
pnv_xive_xscom_read(void * opaque,hwaddr addr,unsigned size)1666  static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1667  {
1668      switch (addr >> 3) {
1669      case X_VC_EQC_CONFIG:
1670          /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1671          return VC_EQC_SYNC_MASK;
1672      default:
1673          return pnv_xive_ic_reg_read(opaque, addr, size);
1674      }
1675  }
1676  
pnv_xive_xscom_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1677  static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1678                                  uint64_t val, unsigned size)
1679  {
1680      pnv_xive_ic_reg_write(opaque, addr, val, size);
1681  }
1682  
1683  static const MemoryRegionOps pnv_xive_xscom_ops = {
1684      .read = pnv_xive_xscom_read,
1685      .write = pnv_xive_xscom_write,
1686      .endianness = DEVICE_BIG_ENDIAN,
1687      .valid = {
1688          .min_access_size = 8,
1689          .max_access_size = 8,
1690      },
1691      .impl = {
1692          .min_access_size = 8,
1693          .max_access_size = 8,
1694      }
1695  };
1696  
1697  /*
1698   * Virtualization Controller MMIO region containing the IPI and END ESB pages
1699   */
pnv_xive_vc_read(void * opaque,hwaddr offset,unsigned size)1700  static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1701                                   unsigned size)
1702  {
1703      PnvXive *xive = PNV_XIVE(opaque);
1704      uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1705      uint64_t edt_type = 0;
1706      uint64_t edt_offset;
1707      MemTxResult result;
1708      AddressSpace *edt_as = NULL;
1709      uint64_t ret = -1;
1710  
1711      if (edt_index < XIVE_TABLE_EDT_MAX) {
1712          edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1713      }
1714  
1715      switch (edt_type) {
1716      case CQ_TDR_EDT_IPI:
1717          edt_as = &xive->ipi_as;
1718          break;
1719      case CQ_TDR_EDT_EQ:
1720          edt_as = &xive->end_as;
1721          break;
1722      default:
1723          xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1724          return -1;
1725      }
1726  
1727      /* Remap the offset for the targeted address space */
1728      edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1729  
1730      ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1731                              &result);
1732  
1733      if (result != MEMTX_OK) {
1734          xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1735                     HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1736                     offset, edt_offset);
1737          return -1;
1738      }
1739  
1740      return ret;
1741  }
1742  
pnv_xive_vc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1743  static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1744                                uint64_t val, unsigned size)
1745  {
1746      PnvXive *xive = PNV_XIVE(opaque);
1747      uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1748      uint64_t edt_type = 0;
1749      uint64_t edt_offset;
1750      MemTxResult result;
1751      AddressSpace *edt_as = NULL;
1752  
1753      if (edt_index < XIVE_TABLE_EDT_MAX) {
1754          edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1755      }
1756  
1757      switch (edt_type) {
1758      case CQ_TDR_EDT_IPI:
1759          edt_as = &xive->ipi_as;
1760          break;
1761      case CQ_TDR_EDT_EQ:
1762          edt_as = &xive->end_as;
1763          break;
1764      default:
1765          xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1766                     offset);
1767          return;
1768      }
1769  
1770      /* Remap the offset for the targeted address space */
1771      edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1772  
1773      address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1774      if (result != MEMTX_OK) {
1775          xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1776      }
1777  }
1778  
1779  static const MemoryRegionOps pnv_xive_vc_ops = {
1780      .read = pnv_xive_vc_read,
1781      .write = pnv_xive_vc_write,
1782      .endianness = DEVICE_BIG_ENDIAN,
1783      .valid = {
1784          .min_access_size = 8,
1785          .max_access_size = 8,
1786      },
1787      .impl = {
1788          .min_access_size = 8,
1789          .max_access_size = 8,
1790      },
1791  };
1792  
1793  /*
1794   * Presenter Controller MMIO region. Points to the NVT sets.
1795   *
1796   * HW implements all possible mem ops to the underlying NVT structure
1797   * but QEMU does not need to be so precise. The model implementation
1798   * simply returns the RAM address of the NVT structure which is then
1799   * used by pnv_xive_vst_write/read to perform the RAM operation.
1800   */
pnv_xive_pc_read(void * opaque,hwaddr offset,unsigned size)1801  static uint64_t pnv_xive_pc_read(void *opaque, hwaddr offset, unsigned size)
1802  {
1803      PnvXive *xive = PNV_XIVE(opaque);
1804      uint32_t nvt_idx = offset >> xive->pc_shift;
1805      uint8_t blk = pnv_xive_block_id(xive); /* TODO: VDT -> block xlate */
1806  
1807      return pnv_xive_vst_addr(xive, VST_TSEL_VPDT, blk, nvt_idx);
1808  }
1809  
pnv_xive_pc_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)1810  static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1811                                uint64_t value, unsigned size)
1812  {
1813      PnvXive *xive = PNV_XIVE(opaque);
1814  
1815      xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1816  }
1817  
1818  static const MemoryRegionOps pnv_xive_pc_ops = {
1819      .read = pnv_xive_pc_read,
1820      .write = pnv_xive_pc_write,
1821      .endianness = DEVICE_BIG_ENDIAN,
1822      .valid = {
1823          .min_access_size = 8,
1824          .max_access_size = 8,
1825      },
1826      .impl = {
1827          .min_access_size = 8,
1828          .max_access_size = 8,
1829      },
1830  };
1831  
xive_nvt_pic_print_info(XiveNVT * nvt,uint32_t nvt_idx,GString * buf)1832  static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1833                                      GString *buf)
1834  {
1835      uint8_t  eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1836      uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1837  
1838      if (!xive_nvt_is_valid(nvt)) {
1839          return;
1840      }
1841  
1842      g_string_append_printf(buf, "  %08x end:%02x/%04x IPB:%02x\n",
1843                             nvt_idx, eq_blk, eq_idx,
1844                             xive_get_field32(NVT_W4_IPB, nvt->w4));
1845  }
1846  
pnv_xive_pic_print_info(PnvXive * xive,GString * buf)1847  void pnv_xive_pic_print_info(PnvXive *xive, GString *buf)
1848  {
1849      XiveRouter *xrtr = XIVE_ROUTER(xive);
1850      uint8_t blk = pnv_xive_block_id(xive);
1851      uint8_t chip_id = xive->chip->chip_id;
1852      uint32_t srcno0 = XIVE_EAS(blk, 0);
1853      uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1854      XiveEAS eas;
1855      XiveEND end;
1856      XiveNVT nvt;
1857      int i;
1858      uint64_t xive_nvt_per_subpage;
1859  
1860      g_string_append_printf(buf, "XIVE[%x] #%d Source %08x .. %08x\n",
1861                             chip_id, blk, srcno0, srcno0 + nr_ipis - 1);
1862      xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
1863  
1864      g_string_append_printf(buf, "XIVE[%x] #%d EAT %08x .. %08x\n",
1865                             chip_id, blk, srcno0, srcno0 + nr_ipis - 1);
1866      for (i = 0; i < nr_ipis; i++) {
1867          if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1868              break;
1869          }
1870          if (!xive_eas_is_masked(&eas)) {
1871              xive_eas_pic_print_info(&eas, i, buf);
1872          }
1873      }
1874  
1875      g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1876      i = 0;
1877      while (!xive_router_get_end(xrtr, blk, i, &end)) {
1878          xive_end_pic_print_info(&end, i++, buf);
1879      }
1880  
1881      g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
1882                             chip_id, blk);
1883      i = 0;
1884      while (!xive_router_get_end(xrtr, blk, i, &end)) {
1885          xive_end_eas_pic_print_info(&end, i++, buf);
1886      }
1887  
1888      g_string_append_printf(buf, "XIVE[%x] #%d NVTT %08x .. %08x\n",
1889                             chip_id, blk, 0, XIVE_NVT_COUNT - 1);
1890      xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1891      for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1892          while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1893              xive_nvt_pic_print_info(&nvt, i++, buf);
1894          }
1895      }
1896  }
1897  
pnv_xive_reset(void * dev)1898  static void pnv_xive_reset(void *dev)
1899  {
1900      PnvXive *xive = PNV_XIVE(dev);
1901      XiveSource *xsrc = &xive->ipi_source;
1902      XiveENDSource *end_xsrc = &xive->end_source;
1903  
1904      /* Default page size (Should be changed at runtime to 64k) */
1905      xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1906  
1907      /* Clear subregions */
1908      if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1909          memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1910      }
1911  
1912      if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1913          memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1914      }
1915  
1916      if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1917          memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1918      }
1919  
1920      if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1921          memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1922      }
1923  }
1924  
pnv_xive_init(Object * obj)1925  static void pnv_xive_init(Object *obj)
1926  {
1927      PnvXive *xive = PNV_XIVE(obj);
1928  
1929      object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1930                              TYPE_XIVE_SOURCE);
1931      object_initialize_child(obj, "end_source", &xive->end_source,
1932                              TYPE_XIVE_END_SOURCE);
1933  }
1934  
1935  /*
1936   *  Maximum number of IRQs and ENDs supported by HW
1937   */
1938  #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1939  #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1940  
pnv_xive_realize(DeviceState * dev,Error ** errp)1941  static void pnv_xive_realize(DeviceState *dev, Error **errp)
1942  {
1943      PnvXive *xive = PNV_XIVE(dev);
1944      PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1945      XiveSource *xsrc = &xive->ipi_source;
1946      XiveENDSource *end_xsrc = &xive->end_source;
1947      Error *local_err = NULL;
1948  
1949      pxc->parent_realize(dev, &local_err);
1950      if (local_err) {
1951          error_propagate(errp, local_err);
1952          return;
1953      }
1954  
1955      assert(xive->chip);
1956  
1957      /*
1958       * The XiveSource and XiveENDSource objects are realized with the
1959       * maximum allowed HW configuration. The ESB MMIO regions will be
1960       * resized dynamically when the controller is configured by the FW
1961       * to limit accesses to resources not provisioned.
1962       */
1963      object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1964                              &error_fatal);
1965      object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1966      if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1967          return;
1968      }
1969  
1970      object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1971                              &error_fatal);
1972      object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1973                               &error_abort);
1974      if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1975          return;
1976      }
1977  
1978      /* Default page size. Generally changed at runtime to 64k */
1979      xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1980  
1981      /* XSCOM region, used for initial configuration of the BARs */
1982      memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1983                            xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1984  
1985      /* Interrupt controller MMIO regions */
1986      memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1987                         PNV9_XIVE_IC_SIZE);
1988  
1989      memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1990                            xive, "xive-ic-reg", 1 << xive->ic_shift);
1991      memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1992                            &pnv_xive_ic_notify_ops,
1993                            xive, "xive-ic-notify", 1 << xive->ic_shift);
1994      xive->ic_notify_mmio.disable_reentrancy_guard = true;
1995  
1996      /* The Pervasive LSI trigger and EOI pages (not modeled) */
1997      memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1998                            xive, "xive-ic-lsi", 2 << xive->ic_shift);
1999  
2000      /* Thread Interrupt Management Area (Indirect) */
2001      memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
2002                            &xive_tm_indirect_ops,
2003                            xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
2004      /*
2005       * Overall Virtualization Controller MMIO region containing the
2006       * IPI ESB pages and END ESB pages. The layout is defined by the
2007       * EDT "Domain table" and the accesses are dispatched using
2008       * address spaces for each.
2009       */
2010      memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
2011                            "xive-vc", PNV9_XIVE_VC_SIZE);
2012  
2013      memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
2014                         PNV9_XIVE_VC_SIZE);
2015      address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
2016      memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
2017                         PNV9_XIVE_VC_SIZE);
2018      address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
2019  
2020      /*
2021       * The MMIO windows exposing the IPI ESBs and the END ESBs in the
2022       * VC region. Their size is configured by the FW in the EDT table.
2023       */
2024      memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
2025      memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
2026  
2027      /* Presenter Controller MMIO region (not modeled) */
2028      memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
2029                            "xive-pc", PNV9_XIVE_PC_SIZE);
2030      xive->pc_mmio.disable_reentrancy_guard = true;
2031  
2032      /* Thread Interrupt Management Area (Direct) */
2033      memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
2034                            xive, "xive-tima", PNV9_XIVE_TM_SIZE);
2035  
2036      qemu_register_reset(pnv_xive_reset, dev);
2037  }
2038  
pnv_xive_dt_xscom(PnvXScomInterface * dev,void * fdt,int xscom_offset)2039  static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
2040                               int xscom_offset)
2041  {
2042      const char compat[] = "ibm,power9-xive-x";
2043      char *name;
2044      int offset;
2045      uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
2046      uint32_t reg[] = {
2047          cpu_to_be32(lpc_pcba),
2048          cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
2049      };
2050  
2051      name = g_strdup_printf("xive@%x", lpc_pcba);
2052      offset = fdt_add_subnode(fdt, xscom_offset, name);
2053      _FDT(offset);
2054      g_free(name);
2055  
2056      _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2057      _FDT((fdt_setprop(fdt, offset, "compatible", compat,
2058                        sizeof(compat))));
2059      return 0;
2060  }
2061  
2062  static Property pnv_xive_properties[] = {
2063      DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
2064      DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
2065      DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
2066      DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
2067      /* The PnvChip id identifies the XIVE interrupt controller. */
2068      DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
2069      DEFINE_PROP_END_OF_LIST(),
2070  };
2071  
pnv_xive_class_init(ObjectClass * klass,void * data)2072  static void pnv_xive_class_init(ObjectClass *klass, void *data)
2073  {
2074      DeviceClass *dc = DEVICE_CLASS(klass);
2075      PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2076      XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
2077      XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2078      XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2079      PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
2080  
2081      xdc->dt_xscom = pnv_xive_dt_xscom;
2082  
2083      dc->desc = "PowerNV XIVE Interrupt Controller";
2084      device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
2085      dc->realize = pnv_xive_realize;
2086      device_class_set_props(dc, pnv_xive_properties);
2087  
2088      xrc->get_eas = pnv_xive_get_eas;
2089      xrc->get_pq = pnv_xive_get_pq;
2090      xrc->set_pq = pnv_xive_set_pq;
2091      xrc->get_end = pnv_xive_get_end;
2092      xrc->write_end = pnv_xive_write_end;
2093      xrc->get_nvt = pnv_xive_get_nvt;
2094      xrc->write_nvt = pnv_xive_write_nvt;
2095      xrc->get_block_id = pnv_xive_get_block_id;
2096      xrc->end_notify = pnv_xive_end_notify;
2097  
2098      xnc->notify = pnv_xive_notify;
2099      xpc->match_nvt  = pnv_xive_match_nvt;
2100      xpc->get_config = pnv_xive_presenter_get_config;
2101  };
2102  
2103  static const TypeInfo pnv_xive_info = {
2104      .name          = TYPE_PNV_XIVE,
2105      .parent        = TYPE_XIVE_ROUTER,
2106      .instance_init = pnv_xive_init,
2107      .instance_size = sizeof(PnvXive),
2108      .class_init    = pnv_xive_class_init,
2109      .class_size    = sizeof(PnvXiveClass),
2110      .interfaces    = (InterfaceInfo[]) {
2111          { TYPE_PNV_XSCOM_INTERFACE },
2112          { }
2113      }
2114  };
2115  
pnv_xive_register_types(void)2116  static void pnv_xive_register_types(void)
2117  {
2118      type_register_static(&pnv_xive_info);
2119  }
2120  
2121  type_init(pnv_xive_register_types)
2122