xref: /openbmc/qemu/hw/intc/xive.c (revision 503bb0b9)
1  /*
2   * QEMU PowerPC XIVE interrupt controller model
3   *
4   * Copyright (c) 2017-2018, IBM Corporation.
5   *
6   * This code is licensed under the GPL version 2 or later. See the
7   * COPYING file in the top-level directory.
8   */
9  
10  #include "qemu/osdep.h"
11  #include "qemu/log.h"
12  #include "qapi/error.h"
13  #include "target/ppc/cpu.h"
14  #include "sysemu/cpus.h"
15  #include "sysemu/dma.h"
16  #include "hw/qdev-properties.h"
17  #include "monitor/monitor.h"
18  #include "hw/ppc/xive.h"
19  #include "hw/ppc/xive_regs.h"
20  
21  /*
22   * XIVE Thread Interrupt Management context
23   */
24  
25  /*
26   * Convert a priority number to an Interrupt Pending Buffer (IPB)
27   * register, which indicates a pending interrupt at the priority
28   * corresponding to the bit number
29   */
30  static uint8_t priority_to_ipb(uint8_t priority)
31  {
32      return priority > XIVE_PRIORITY_MAX ?
33          0 : 1 << (XIVE_PRIORITY_MAX - priority);
34  }
35  
36  /*
37   * Convert an Interrupt Pending Buffer (IPB) register to a Pending
38   * Interrupt Priority Register (PIPR), which contains the priority of
39   * the most favored pending notification.
40   */
41  static uint8_t ipb_to_pipr(uint8_t ibp)
42  {
43      return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
44  }
45  
46  static void ipb_update(uint8_t *regs, uint8_t priority)
47  {
48      regs[TM_IPB] |= priority_to_ipb(priority);
49      regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
50  }
51  
52  static uint8_t exception_mask(uint8_t ring)
53  {
54      switch (ring) {
55      case TM_QW1_OS:
56          return TM_QW1_NSR_EO;
57      default:
58          g_assert_not_reached();
59      }
60  }
61  
62  static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
63  {
64      uint8_t *regs = &tctx->regs[ring];
65      uint8_t nsr = regs[TM_NSR];
66      uint8_t mask = exception_mask(ring);
67  
68      qemu_irq_lower(tctx->output);
69  
70      if (regs[TM_NSR] & mask) {
71          uint8_t cppr = regs[TM_PIPR];
72  
73          regs[TM_CPPR] = cppr;
74  
75          /* Reset the pending buffer bit */
76          regs[TM_IPB] &= ~priority_to_ipb(cppr);
77          regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
78  
79          /* Drop Exception bit */
80          regs[TM_NSR] &= ~mask;
81      }
82  
83      return (nsr << 8) | regs[TM_CPPR];
84  }
85  
86  static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
87  {
88      uint8_t *regs = &tctx->regs[ring];
89  
90      if (regs[TM_PIPR] < regs[TM_CPPR]) {
91          regs[TM_NSR] |= exception_mask(ring);
92          qemu_irq_raise(tctx->output);
93      }
94  }
95  
96  static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
97  {
98      if (cppr > XIVE_PRIORITY_MAX) {
99          cppr = 0xff;
100      }
101  
102      tctx->regs[ring + TM_CPPR] = cppr;
103  
104      /* CPPR has changed, check if we need to raise a pending exception */
105      xive_tctx_notify(tctx, ring);
106  }
107  
108  /*
109   * XIVE Thread Interrupt Management Area (TIMA)
110   */
111  
112  /*
113   * Define an access map for each page of the TIMA that we will use in
114   * the memory region ops to filter values when doing loads and stores
115   * of raw registers values
116   *
117   * Registers accessibility bits :
118   *
119   *    0x0 - no access
120   *    0x1 - write only
121   *    0x2 - read only
122   *    0x3 - read/write
123   */
124  
125  static const uint8_t xive_tm_hw_view[] = {
126      /* QW-0 User */   3, 0, 0, 0,   0, 0, 0, 0,   3, 3, 3, 3,   0, 0, 0, 0,
127      /* QW-1 OS   */   3, 3, 3, 3,   3, 3, 0, 3,   3, 3, 3, 3,   0, 0, 0, 0,
128      /* QW-2 POOL */   0, 0, 3, 3,   0, 0, 0, 0,   3, 3, 3, 3,   0, 0, 0, 0,
129      /* QW-3 PHYS */   3, 3, 3, 3,   0, 3, 0, 3,   3, 0, 0, 3,   3, 3, 3, 0,
130  };
131  
132  static const uint8_t xive_tm_hv_view[] = {
133      /* QW-0 User */   3, 0, 0, 0,   0, 0, 0, 0,   3, 3, 3, 3,   0, 0, 0, 0,
134      /* QW-1 OS   */   3, 3, 3, 3,   3, 3, 0, 3,   3, 3, 3, 3,   0, 0, 0, 0,
135      /* QW-2 POOL */   0, 0, 3, 3,   0, 0, 0, 0,   0, 3, 3, 3,   0, 0, 0, 0,
136      /* QW-3 PHYS */   3, 3, 3, 3,   0, 3, 0, 3,   3, 0, 0, 3,   0, 0, 0, 0,
137  };
138  
139  static const uint8_t xive_tm_os_view[] = {
140      /* QW-0 User */   3, 0, 0, 0,   0, 0, 0, 0,   3, 3, 3, 3,   0, 0, 0, 0,
141      /* QW-1 OS   */   2, 3, 2, 2,   2, 2, 0, 2,   0, 0, 0, 0,   0, 0, 0, 0,
142      /* QW-2 POOL */   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,
143      /* QW-3 PHYS */   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,
144  };
145  
146  static const uint8_t xive_tm_user_view[] = {
147      /* QW-0 User */   3, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,
148      /* QW-1 OS   */   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,
149      /* QW-2 POOL */   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,
150      /* QW-3 PHYS */   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,   0, 0, 0, 0,
151  };
152  
153  /*
154   * Overall TIMA access map for the thread interrupt management context
155   * registers
156   */
157  static const uint8_t *xive_tm_views[] = {
158      [XIVE_TM_HW_PAGE]   = xive_tm_hw_view,
159      [XIVE_TM_HV_PAGE]   = xive_tm_hv_view,
160      [XIVE_TM_OS_PAGE]   = xive_tm_os_view,
161      [XIVE_TM_USER_PAGE] = xive_tm_user_view,
162  };
163  
164  /*
165   * Computes a register access mask for a given offset in the TIMA
166   */
167  static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
168  {
169      uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
170      uint8_t reg_offset = offset & 0x3F;
171      uint8_t reg_mask = write ? 0x1 : 0x2;
172      uint64_t mask = 0x0;
173      int i;
174  
175      for (i = 0; i < size; i++) {
176          if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
177              mask |= (uint64_t) 0xff << (8 * (size - i - 1));
178          }
179      }
180  
181      return mask;
182  }
183  
184  static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
185                                unsigned size)
186  {
187      uint8_t ring_offset = offset & 0x30;
188      uint8_t reg_offset = offset & 0x3F;
189      uint64_t mask = xive_tm_mask(offset, size, true);
190      int i;
191  
192      /*
193       * Only 4 or 8 bytes stores are allowed and the User ring is
194       * excluded
195       */
196      if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
197          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
198                        HWADDR_PRIx"\n", offset);
199          return;
200      }
201  
202      /*
203       * Use the register offset for the raw values and filter out
204       * reserved values
205       */
206      for (i = 0; i < size; i++) {
207          uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
208          if (byte_mask) {
209              tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
210                  byte_mask;
211          }
212      }
213  }
214  
215  static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
216  {
217      uint8_t ring_offset = offset & 0x30;
218      uint8_t reg_offset = offset & 0x3F;
219      uint64_t mask = xive_tm_mask(offset, size, false);
220      uint64_t ret;
221      int i;
222  
223      /*
224       * Only 4 or 8 bytes loads are allowed and the User ring is
225       * excluded
226       */
227      if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
228          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
229                        HWADDR_PRIx"\n", offset);
230          return -1;
231      }
232  
233      /* Use the register offset for the raw values */
234      ret = 0;
235      for (i = 0; i < size; i++) {
236          ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
237      }
238  
239      /* filter out reserved values */
240      return ret & mask;
241  }
242  
243  /*
244   * The TM context is mapped twice within each page. Stores and loads
245   * to the first mapping below 2K write and read the specified values
246   * without modification. The second mapping above 2K performs specific
247   * state changes (side effects) in addition to setting/returning the
248   * interrupt management area context of the processor thread.
249   */
250  static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
251  {
252      return xive_tctx_accept(tctx, TM_QW1_OS);
253  }
254  
255  static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset,
256                                  uint64_t value, unsigned size)
257  {
258      xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
259  }
260  
261  /*
262   * Adjust the IPB to allow a CPU to process event queues of other
263   * priorities during one physical interrupt cycle.
264   */
265  static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset,
266                                     uint64_t value, unsigned size)
267  {
268      ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff);
269      xive_tctx_notify(tctx, TM_QW1_OS);
270  }
271  
272  /*
273   * Define a mapping of "special" operations depending on the TIMA page
274   * offset and the size of the operation.
275   */
276  typedef struct XiveTmOp {
277      uint8_t  page_offset;
278      uint32_t op_offset;
279      unsigned size;
280      void     (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value,
281                                unsigned size);
282      uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size);
283  } XiveTmOp;
284  
285  static const XiveTmOp xive_tm_operations[] = {
286      /*
287       * MMIOs below 2K : raw values and special operations without side
288       * effects
289       */
290      { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR,   1, xive_tm_set_os_cppr, NULL },
291  
292      /* MMIOs above 2K : special operations with side effects */
293      { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG,     2, NULL, xive_tm_ack_os_reg },
294      { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
295  };
296  
297  static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
298  {
299      uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
300      uint32_t op_offset = offset & 0xFFF;
301      int i;
302  
303      for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
304          const XiveTmOp *xto = &xive_tm_operations[i];
305  
306          /* Accesses done from a more privileged TIMA page is allowed */
307          if (xto->page_offset >= page_offset &&
308              xto->op_offset == op_offset &&
309              xto->size == size &&
310              ((write && xto->write_handler) || (!write && xto->read_handler))) {
311              return xto;
312          }
313      }
314      return NULL;
315  }
316  
317  /*
318   * TIMA MMIO handlers
319   */
320  static void xive_tm_write(void *opaque, hwaddr offset,
321                            uint64_t value, unsigned size)
322  {
323      XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
324      const XiveTmOp *xto;
325  
326      /*
327       * TODO: check V bit in Q[0-3]W2, check PTER bit associated with CPU
328       */
329  
330      /*
331       * First, check for special operations in the 2K region
332       */
333      if (offset & 0x800) {
334          xto = xive_tm_find_op(offset, size, true);
335          if (!xto) {
336              qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA"
337                            "@%"HWADDR_PRIx"\n", offset);
338          } else {
339              xto->write_handler(tctx, offset, value, size);
340          }
341          return;
342      }
343  
344      /*
345       * Then, for special operations in the region below 2K.
346       */
347      xto = xive_tm_find_op(offset, size, true);
348      if (xto) {
349          xto->write_handler(tctx, offset, value, size);
350          return;
351      }
352  
353      /*
354       * Finish with raw access to the register values
355       */
356      xive_tm_raw_write(tctx, offset, value, size);
357  }
358  
359  static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size)
360  {
361      XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
362      const XiveTmOp *xto;
363  
364      /*
365       * TODO: check V bit in Q[0-3]W2, check PTER bit associated with CPU
366       */
367  
368      /*
369       * First, check for special operations in the 2K region
370       */
371      if (offset & 0x800) {
372          xto = xive_tm_find_op(offset, size, false);
373          if (!xto) {
374              qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
375                            "@%"HWADDR_PRIx"\n", offset);
376              return -1;
377          }
378          return xto->read_handler(tctx, offset, size);
379      }
380  
381      /*
382       * Then, for special operations in the region below 2K.
383       */
384      xto = xive_tm_find_op(offset, size, false);
385      if (xto) {
386          return xto->read_handler(tctx, offset, size);
387      }
388  
389      /*
390       * Finish with raw access to the register values
391       */
392      return xive_tm_raw_read(tctx, offset, size);
393  }
394  
395  const MemoryRegionOps xive_tm_ops = {
396      .read = xive_tm_read,
397      .write = xive_tm_write,
398      .endianness = DEVICE_BIG_ENDIAN,
399      .valid = {
400          .min_access_size = 1,
401          .max_access_size = 8,
402      },
403      .impl = {
404          .min_access_size = 1,
405          .max_access_size = 8,
406      },
407  };
408  
409  static inline uint32_t xive_tctx_word2(uint8_t *ring)
410  {
411      return *((uint32_t *) &ring[TM_WORD2]);
412  }
413  
414  static char *xive_tctx_ring_print(uint8_t *ring)
415  {
416      uint32_t w2 = xive_tctx_word2(ring);
417  
418      return g_strdup_printf("%02x   %02x  %02x    %02x   %02x  "
419                     "%02x  %02x   %02x  %08x",
420                     ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
421                     ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
422                     be32_to_cpu(w2));
423  }
424  
425  static const char * const xive_tctx_ring_names[] = {
426      "USER", "OS", "POOL", "PHYS",
427  };
428  
429  void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
430  {
431      int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
432      int i;
433  
434      monitor_printf(mon, "CPU[%04x]:   QW   NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
435                     "  W2\n", cpu_index);
436  
437      for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
438          char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
439          monitor_printf(mon, "CPU[%04x]: %4s    %s\n", cpu_index,
440                         xive_tctx_ring_names[i], s);
441          g_free(s);
442      }
443  }
444  
445  static void xive_tctx_reset(void *dev)
446  {
447      XiveTCTX *tctx = XIVE_TCTX(dev);
448  
449      memset(tctx->regs, 0, sizeof(tctx->regs));
450  
451      /* Set some defaults */
452      tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
453      tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
454      tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
455  
456      /*
457       * Initialize PIPR to 0xFF to avoid phantom interrupts when the
458       * CPPR is first set.
459       */
460      tctx->regs[TM_QW1_OS + TM_PIPR] =
461          ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
462  }
463  
464  static void xive_tctx_realize(DeviceState *dev, Error **errp)
465  {
466      XiveTCTX *tctx = XIVE_TCTX(dev);
467      PowerPCCPU *cpu;
468      CPUPPCState *env;
469      Object *obj;
470      Error *local_err = NULL;
471  
472      obj = object_property_get_link(OBJECT(dev), "cpu", &local_err);
473      if (!obj) {
474          error_propagate(errp, local_err);
475          error_prepend(errp, "required link 'cpu' not found: ");
476          return;
477      }
478  
479      cpu = POWERPC_CPU(obj);
480      tctx->cs = CPU(obj);
481  
482      env = &cpu->env;
483      switch (PPC_INPUT(env)) {
484      case PPC_FLAGS_INPUT_POWER7:
485          tctx->output = env->irq_inputs[POWER7_INPUT_INT];
486          break;
487  
488      default:
489          error_setg(errp, "XIVE interrupt controller does not support "
490                     "this CPU bus model");
491          return;
492      }
493  
494      qemu_register_reset(xive_tctx_reset, dev);
495  }
496  
497  static void xive_tctx_unrealize(DeviceState *dev, Error **errp)
498  {
499      qemu_unregister_reset(xive_tctx_reset, dev);
500  }
501  
502  static const VMStateDescription vmstate_xive_tctx = {
503      .name = TYPE_XIVE_TCTX,
504      .version_id = 1,
505      .minimum_version_id = 1,
506      .fields = (VMStateField[]) {
507          VMSTATE_BUFFER(regs, XiveTCTX),
508          VMSTATE_END_OF_LIST()
509      },
510  };
511  
512  static void xive_tctx_class_init(ObjectClass *klass, void *data)
513  {
514      DeviceClass *dc = DEVICE_CLASS(klass);
515  
516      dc->desc = "XIVE Interrupt Thread Context";
517      dc->realize = xive_tctx_realize;
518      dc->unrealize = xive_tctx_unrealize;
519      dc->vmsd = &vmstate_xive_tctx;
520  }
521  
522  static const TypeInfo xive_tctx_info = {
523      .name          = TYPE_XIVE_TCTX,
524      .parent        = TYPE_DEVICE,
525      .instance_size = sizeof(XiveTCTX),
526      .class_init    = xive_tctx_class_init,
527  };
528  
529  Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp)
530  {
531      Error *local_err = NULL;
532      Object *obj;
533  
534      obj = object_new(TYPE_XIVE_TCTX);
535      object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort);
536      object_unref(obj);
537      object_property_add_const_link(obj, "cpu", cpu, &error_abort);
538      object_property_set_bool(obj, true, "realized", &local_err);
539      if (local_err) {
540          goto error;
541      }
542  
543      return obj;
544  
545  error:
546      object_unparent(obj);
547      error_propagate(errp, local_err);
548      return NULL;
549  }
550  
551  /*
552   * XIVE ESB helpers
553   */
554  
555  static uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
556  {
557      uint8_t old_pq = *pq & 0x3;
558  
559      *pq &= ~0x3;
560      *pq |= value & 0x3;
561  
562      return old_pq;
563  }
564  
565  static bool xive_esb_trigger(uint8_t *pq)
566  {
567      uint8_t old_pq = *pq & 0x3;
568  
569      switch (old_pq) {
570      case XIVE_ESB_RESET:
571          xive_esb_set(pq, XIVE_ESB_PENDING);
572          return true;
573      case XIVE_ESB_PENDING:
574      case XIVE_ESB_QUEUED:
575          xive_esb_set(pq, XIVE_ESB_QUEUED);
576          return false;
577      case XIVE_ESB_OFF:
578          xive_esb_set(pq, XIVE_ESB_OFF);
579          return false;
580      default:
581           g_assert_not_reached();
582      }
583  }
584  
585  static bool xive_esb_eoi(uint8_t *pq)
586  {
587      uint8_t old_pq = *pq & 0x3;
588  
589      switch (old_pq) {
590      case XIVE_ESB_RESET:
591      case XIVE_ESB_PENDING:
592          xive_esb_set(pq, XIVE_ESB_RESET);
593          return false;
594      case XIVE_ESB_QUEUED:
595          xive_esb_set(pq, XIVE_ESB_PENDING);
596          return true;
597      case XIVE_ESB_OFF:
598          xive_esb_set(pq, XIVE_ESB_OFF);
599          return false;
600      default:
601           g_assert_not_reached();
602      }
603  }
604  
605  /*
606   * XIVE Interrupt Source (or IVSE)
607   */
608  
609  uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
610  {
611      assert(srcno < xsrc->nr_irqs);
612  
613      return xsrc->status[srcno] & 0x3;
614  }
615  
616  uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
617  {
618      assert(srcno < xsrc->nr_irqs);
619  
620      return xive_esb_set(&xsrc->status[srcno], pq);
621  }
622  
623  /*
624   * Returns whether the event notification should be forwarded.
625   */
626  static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
627  {
628      uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
629  
630      xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
631  
632      switch (old_pq) {
633      case XIVE_ESB_RESET:
634          xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
635          return true;
636      default:
637          return false;
638      }
639  }
640  
641  /*
642   * Returns whether the event notification should be forwarded.
643   */
644  static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
645  {
646      bool ret;
647  
648      assert(srcno < xsrc->nr_irqs);
649  
650      ret = xive_esb_trigger(&xsrc->status[srcno]);
651  
652      if (xive_source_irq_is_lsi(xsrc, srcno) &&
653          xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
654          qemu_log_mask(LOG_GUEST_ERROR,
655                        "XIVE: queued an event on LSI IRQ %d\n", srcno);
656      }
657  
658      return ret;
659  }
660  
661  /*
662   * Returns whether the event notification should be forwarded.
663   */
664  static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
665  {
666      bool ret;
667  
668      assert(srcno < xsrc->nr_irqs);
669  
670      ret = xive_esb_eoi(&xsrc->status[srcno]);
671  
672      /*
673       * LSI sources do not set the Q bit but they can still be
674       * asserted, in which case we should forward a new event
675       * notification
676       */
677      if (xive_source_irq_is_lsi(xsrc, srcno) &&
678          xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
679          ret = xive_source_lsi_trigger(xsrc, srcno);
680      }
681  
682      return ret;
683  }
684  
685  /*
686   * Forward the source event notification to the Router
687   */
688  static void xive_source_notify(XiveSource *xsrc, int srcno)
689  {
690      XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
691  
692      if (xnc->notify) {
693          xnc->notify(xsrc->xive, srcno);
694      }
695  }
696  
697  /*
698   * In a two pages ESB MMIO setting, even page is the trigger page, odd
699   * page is for management
700   */
701  static inline bool addr_is_even(hwaddr addr, uint32_t shift)
702  {
703      return !((addr >> shift) & 1);
704  }
705  
706  static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
707  {
708      return xive_source_esb_has_2page(xsrc) &&
709          addr_is_even(addr, xsrc->esb_shift - 1);
710  }
711  
712  /*
713   * ESB MMIO loads
714   *                      Trigger page    Management/EOI page
715   *
716   * ESB MMIO setting     2 pages         1 or 2 pages
717   *
718   * 0x000 .. 0x3FF       -1              EOI and return 0|1
719   * 0x400 .. 0x7FF       -1              EOI and return 0|1
720   * 0x800 .. 0xBFF       -1              return PQ
721   * 0xC00 .. 0xCFF       -1              return PQ and atomically PQ=00
722   * 0xD00 .. 0xDFF       -1              return PQ and atomically PQ=01
723   * 0xE00 .. 0xDFF       -1              return PQ and atomically PQ=10
724   * 0xF00 .. 0xDFF       -1              return PQ and atomically PQ=11
725   */
726  static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
727  {
728      XiveSource *xsrc = XIVE_SOURCE(opaque);
729      uint32_t offset = addr & 0xFFF;
730      uint32_t srcno = addr >> xsrc->esb_shift;
731      uint64_t ret = -1;
732  
733      /* In a two pages ESB MMIO setting, trigger page should not be read */
734      if (xive_source_is_trigger_page(xsrc, addr)) {
735          qemu_log_mask(LOG_GUEST_ERROR,
736                        "XIVE: invalid load on IRQ %d trigger page at "
737                        "0x%"HWADDR_PRIx"\n", srcno, addr);
738          return -1;
739      }
740  
741      switch (offset) {
742      case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
743          ret = xive_source_esb_eoi(xsrc, srcno);
744  
745          /* Forward the source event notification for routing */
746          if (ret) {
747              xive_source_notify(xsrc, srcno);
748          }
749          break;
750  
751      case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
752          ret = xive_source_esb_get(xsrc, srcno);
753          break;
754  
755      case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
756      case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
757      case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
758      case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
759          ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
760          break;
761      default:
762          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
763                        offset);
764      }
765  
766      return ret;
767  }
768  
769  /*
770   * ESB MMIO stores
771   *                      Trigger page    Management/EOI page
772   *
773   * ESB MMIO setting     2 pages         1 or 2 pages
774   *
775   * 0x000 .. 0x3FF       Trigger         Trigger
776   * 0x400 .. 0x7FF       Trigger         EOI
777   * 0x800 .. 0xBFF       Trigger         undefined
778   * 0xC00 .. 0xCFF       Trigger         PQ=00
779   * 0xD00 .. 0xDFF       Trigger         PQ=01
780   * 0xE00 .. 0xDFF       Trigger         PQ=10
781   * 0xF00 .. 0xDFF       Trigger         PQ=11
782   */
783  static void xive_source_esb_write(void *opaque, hwaddr addr,
784                                    uint64_t value, unsigned size)
785  {
786      XiveSource *xsrc = XIVE_SOURCE(opaque);
787      uint32_t offset = addr & 0xFFF;
788      uint32_t srcno = addr >> xsrc->esb_shift;
789      bool notify = false;
790  
791      /* In a two pages ESB MMIO setting, trigger page only triggers */
792      if (xive_source_is_trigger_page(xsrc, addr)) {
793          notify = xive_source_esb_trigger(xsrc, srcno);
794          goto out;
795      }
796  
797      switch (offset) {
798      case 0 ... 0x3FF:
799          notify = xive_source_esb_trigger(xsrc, srcno);
800          break;
801  
802      case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
803          if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
804              qemu_log_mask(LOG_GUEST_ERROR,
805                            "XIVE: invalid Store EOI for IRQ %d\n", srcno);
806              return;
807          }
808  
809          notify = xive_source_esb_eoi(xsrc, srcno);
810          break;
811  
812      case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
813      case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
814      case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
815      case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
816          xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
817          break;
818  
819      default:
820          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
821                        offset);
822          return;
823      }
824  
825  out:
826      /* Forward the source event notification for routing */
827      if (notify) {
828          xive_source_notify(xsrc, srcno);
829      }
830  }
831  
832  static const MemoryRegionOps xive_source_esb_ops = {
833      .read = xive_source_esb_read,
834      .write = xive_source_esb_write,
835      .endianness = DEVICE_BIG_ENDIAN,
836      .valid = {
837          .min_access_size = 8,
838          .max_access_size = 8,
839      },
840      .impl = {
841          .min_access_size = 8,
842          .max_access_size = 8,
843      },
844  };
845  
846  void xive_source_set_irq(void *opaque, int srcno, int val)
847  {
848      XiveSource *xsrc = XIVE_SOURCE(opaque);
849      bool notify = false;
850  
851      if (xive_source_irq_is_lsi(xsrc, srcno)) {
852          if (val) {
853              notify = xive_source_lsi_trigger(xsrc, srcno);
854          } else {
855              xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
856          }
857      } else {
858          if (val) {
859              notify = xive_source_esb_trigger(xsrc, srcno);
860          }
861      }
862  
863      /* Forward the source event notification for routing */
864      if (notify) {
865          xive_source_notify(xsrc, srcno);
866      }
867  }
868  
869  void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
870  {
871      int i;
872  
873      for (i = 0; i < xsrc->nr_irqs; i++) {
874          uint8_t pq = xive_source_esb_get(xsrc, i);
875  
876          if (pq == XIVE_ESB_OFF) {
877              continue;
878          }
879  
880          monitor_printf(mon, "  %08x %s %c%c%c\n", i + offset,
881                         xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
882                         pq & XIVE_ESB_VAL_P ? 'P' : '-',
883                         pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
884                         xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ');
885      }
886  }
887  
888  static void xive_source_reset(void *dev)
889  {
890      XiveSource *xsrc = XIVE_SOURCE(dev);
891  
892      /* Do not clear the LSI bitmap */
893  
894      /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */
895      memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
896  }
897  
898  static void xive_source_realize(DeviceState *dev, Error **errp)
899  {
900      XiveSource *xsrc = XIVE_SOURCE(dev);
901      Object *obj;
902      Error *local_err = NULL;
903  
904      obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
905      if (!obj) {
906          error_propagate(errp, local_err);
907          error_prepend(errp, "required link 'xive' not found: ");
908          return;
909      }
910  
911      xsrc->xive = XIVE_NOTIFIER(obj);
912  
913      if (!xsrc->nr_irqs) {
914          error_setg(errp, "Number of interrupt needs to be greater than 0");
915          return;
916      }
917  
918      if (xsrc->esb_shift != XIVE_ESB_4K &&
919          xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
920          xsrc->esb_shift != XIVE_ESB_64K &&
921          xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
922          error_setg(errp, "Invalid ESB shift setting");
923          return;
924      }
925  
926      xsrc->status = g_malloc0(xsrc->nr_irqs);
927      xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
928  
929      memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
930                            &xive_source_esb_ops, xsrc, "xive.esb",
931                            (1ull << xsrc->esb_shift) * xsrc->nr_irqs);
932  
933      qemu_register_reset(xive_source_reset, dev);
934  }
935  
936  static const VMStateDescription vmstate_xive_source = {
937      .name = TYPE_XIVE_SOURCE,
938      .version_id = 1,
939      .minimum_version_id = 1,
940      .fields = (VMStateField[]) {
941          VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
942          VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
943          VMSTATE_END_OF_LIST()
944      },
945  };
946  
947  /*
948   * The default XIVE interrupt source setting for the ESB MMIOs is two
949   * 64k pages without Store EOI, to be in sync with KVM.
950   */
951  static Property xive_source_properties[] = {
952      DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
953      DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
954      DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
955      DEFINE_PROP_END_OF_LIST(),
956  };
957  
958  static void xive_source_class_init(ObjectClass *klass, void *data)
959  {
960      DeviceClass *dc = DEVICE_CLASS(klass);
961  
962      dc->desc    = "XIVE Interrupt Source";
963      dc->props   = xive_source_properties;
964      dc->realize = xive_source_realize;
965      dc->vmsd    = &vmstate_xive_source;
966  }
967  
968  static const TypeInfo xive_source_info = {
969      .name          = TYPE_XIVE_SOURCE,
970      .parent        = TYPE_DEVICE,
971      .instance_size = sizeof(XiveSource),
972      .class_init    = xive_source_class_init,
973  };
974  
975  /*
976   * XiveEND helpers
977   */
978  
979  void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
980  {
981      uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
982          | be32_to_cpu(end->w3);
983      uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
984      uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
985      uint32_t qentries = 1 << (qsize + 10);
986      int i;
987  
988      /*
989       * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
990       */
991      monitor_printf(mon, " [ ");
992      qindex = (qindex - (width - 1)) & (qentries - 1);
993      for (i = 0; i < width; i++) {
994          uint64_t qaddr = qaddr_base + (qindex << 2);
995          uint32_t qdata = -1;
996  
997          if (dma_memory_read(&address_space_memory, qaddr, &qdata,
998                              sizeof(qdata))) {
999              qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1000                            HWADDR_PRIx "\n", qaddr);
1001              return;
1002          }
1003          monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1004                         be32_to_cpu(qdata));
1005          qindex = (qindex + 1) & (qentries - 1);
1006      }
1007  }
1008  
1009  void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1010  {
1011      uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
1012          | be32_to_cpu(end->w3);
1013      uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1014      uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1015      uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1016      uint32_t qentries = 1 << (qsize + 10);
1017  
1018      uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1019      uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1020  
1021      if (!xive_end_is_valid(end)) {
1022          return;
1023      }
1024  
1025      monitor_printf(mon, "  %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64
1026                     "% 6d/%5d ^%d", end_idx,
1027                     xive_end_is_valid(end)    ? 'v' : '-',
1028                     xive_end_is_enqueue(end)  ? 'q' : '-',
1029                     xive_end_is_notify(end)   ? 'n' : '-',
1030                     xive_end_is_backlog(end)  ? 'b' : '-',
1031                     xive_end_is_escalate(end) ? 'e' : '-',
1032                     priority, nvt, qaddr_base, qindex, qentries, qgen);
1033  
1034      xive_end_queue_pic_print_info(end, 6, mon);
1035      monitor_printf(mon, "]\n");
1036  }
1037  
1038  static void xive_end_enqueue(XiveEND *end, uint32_t data)
1039  {
1040      uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
1041          | be32_to_cpu(end->w3);
1042      uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1043      uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1044      uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1045  
1046      uint64_t qaddr = qaddr_base + (qindex << 2);
1047      uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1048      uint32_t qentries = 1 << (qsize + 10);
1049  
1050      if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
1051          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1052                        HWADDR_PRIx "\n", qaddr);
1053          return;
1054      }
1055  
1056      qindex = (qindex + 1) & (qentries - 1);
1057      if (qindex == 0) {
1058          qgen ^= 1;
1059          end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1060      }
1061      end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1062  }
1063  
1064  /*
1065   * XIVE Router (aka. Virtualization Controller or IVRE)
1066   */
1067  
1068  int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1069                          XiveEAS *eas)
1070  {
1071      XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1072  
1073      return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1074  }
1075  
1076  int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1077                          XiveEND *end)
1078  {
1079     XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1080  
1081     return xrc->get_end(xrtr, end_blk, end_idx, end);
1082  }
1083  
1084  int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1085                            XiveEND *end, uint8_t word_number)
1086  {
1087     XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1088  
1089     return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1090  }
1091  
1092  int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1093                          XiveNVT *nvt)
1094  {
1095     XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1096  
1097     return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1098  }
1099  
1100  int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1101                          XiveNVT *nvt, uint8_t word_number)
1102  {
1103     XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1104  
1105     return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1106  }
1107  
1108  XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs)
1109  {
1110      XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1111  
1112      return xrc->get_tctx(xrtr, cs);
1113  }
1114  
1115  /*
1116   * The thread context register words are in big-endian format.
1117   */
1118  static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format,
1119                                       uint8_t nvt_blk, uint32_t nvt_idx,
1120                                       bool cam_ignore, uint32_t logic_serv)
1121  {
1122      uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1123      uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1124      uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1125      uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1126  
1127      /*
1128       * TODO (PowerNV): ignore mode. The low order bits of the NVT
1129       * identifier are ignored in the "CAM" match.
1130       */
1131  
1132      if (format == 0) {
1133          if (cam_ignore == true) {
1134              /*
1135               * F=0 & i=1: Logical server notification (bits ignored at
1136               * the end of the NVT identifier)
1137               */
1138              qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1139                            nvt_blk, nvt_idx);
1140               return -1;
1141          }
1142  
1143          /* F=0 & i=0: Specific NVT notification */
1144  
1145          /* TODO (PowerNV) : PHYS ring */
1146  
1147          /* HV POOL ring */
1148          if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1149              cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1150              return TM_QW2_HV_POOL;
1151          }
1152  
1153          /* OS ring */
1154          if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1155              cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1156              return TM_QW1_OS;
1157          }
1158      } else {
1159          /* F=1 : User level Event-Based Branch (EBB) notification */
1160  
1161          /* USER ring */
1162          if  ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1163               (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1164               (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1165               (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1166              return TM_QW0_USER;
1167          }
1168      }
1169      return -1;
1170  }
1171  
1172  typedef struct XiveTCTXMatch {
1173      XiveTCTX *tctx;
1174      uint8_t ring;
1175  } XiveTCTXMatch;
1176  
1177  static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
1178                                   uint8_t nvt_blk, uint32_t nvt_idx,
1179                                   bool cam_ignore, uint8_t priority,
1180                                   uint32_t logic_serv, XiveTCTXMatch *match)
1181  {
1182      CPUState *cs;
1183  
1184      /*
1185       * TODO (PowerNV): handle chip_id overwrite of block field for
1186       * hardwired CAM compares
1187       */
1188  
1189      CPU_FOREACH(cs) {
1190          XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs);
1191          int ring;
1192  
1193          /*
1194           * HW checks that the CPU is enabled in the Physical Thread
1195           * Enable Register (PTER).
1196           */
1197  
1198          /*
1199           * Check the thread context CAM lines and record matches. We
1200           * will handle CPU exception delivery later
1201           */
1202          ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx,
1203                                           cam_ignore, logic_serv);
1204          /*
1205           * Save the context and follow on to catch duplicates, that we
1206           * don't support yet.
1207           */
1208          if (ring != -1) {
1209              if (match->tctx) {
1210                  qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
1211                                "context NVT %x/%x\n", nvt_blk, nvt_idx);
1212                  return false;
1213              }
1214  
1215              match->ring = ring;
1216              match->tctx = tctx;
1217          }
1218      }
1219  
1220      if (!match->tctx) {
1221          qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n",
1222                        nvt_blk, nvt_idx);
1223          return false;
1224      }
1225  
1226      return true;
1227  }
1228  
1229  /*
1230   * This is our simple Xive Presenter Engine model. It is merged in the
1231   * Router as it does not require an extra object.
1232   *
1233   * It receives notification requests sent by the IVRE to find one
1234   * matching NVT (or more) dispatched on the processor threads. In case
1235   * of a single NVT notification, the process is abreviated and the
1236   * thread is signaled if a match is found. In case of a logical server
1237   * notification (bits ignored at the end of the NVT identifier), the
1238   * IVPE and IVRE select a winning thread using different filters. This
1239   * involves 2 or 3 exchanges on the PowerBus that the model does not
1240   * support.
1241   *
1242   * The parameters represent what is sent on the PowerBus
1243   */
1244  static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format,
1245                                    uint8_t nvt_blk, uint32_t nvt_idx,
1246                                    bool cam_ignore, uint8_t priority,
1247                                    uint32_t logic_serv)
1248  {
1249      XiveNVT nvt;
1250      XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1251      bool found;
1252  
1253      /* NVT cache lookup */
1254      if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1255          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1256                        nvt_blk, nvt_idx);
1257          return;
1258      }
1259  
1260      if (!xive_nvt_is_valid(&nvt)) {
1261          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1262                        nvt_blk, nvt_idx);
1263          return;
1264      }
1265  
1266      found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore,
1267                                   priority, logic_serv, &match);
1268      if (found) {
1269          ipb_update(&match.tctx->regs[match.ring], priority);
1270          xive_tctx_notify(match.tctx, match.ring);
1271          return;
1272      }
1273  
1274      /* Record the IPB in the associated NVT structure */
1275      ipb_update((uint8_t *) &nvt.w4, priority);
1276      xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1277  
1278      /*
1279       * If no matching NVT is dispatched on a HW thread :
1280       * - update the NVT structure if backlog is activated
1281       * - escalate (ESe PQ bits and EAS in w4-5) if escalation is
1282       *   activated
1283       */
1284  }
1285  
1286  /*
1287   * An END trigger can come from an event trigger (IPI or HW) or from
1288   * another chip. We don't model the PowerBus but the END trigger
1289   * message has the same parameters than in the function below.
1290   */
1291  static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1292                                     uint32_t end_idx, uint32_t end_data)
1293  {
1294      XiveEND end;
1295      uint8_t priority;
1296      uint8_t format;
1297  
1298      /* END cache lookup */
1299      if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1300          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1301                        end_idx);
1302          return;
1303      }
1304  
1305      if (!xive_end_is_valid(&end)) {
1306          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1307                        end_blk, end_idx);
1308          return;
1309      }
1310  
1311      if (xive_end_is_enqueue(&end)) {
1312          xive_end_enqueue(&end, end_data);
1313          /* Enqueuing event data modifies the EQ toggle and index */
1314          xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1315      }
1316  
1317      /*
1318       * The W7 format depends on the F bit in W6. It defines the type
1319       * of the notification :
1320       *
1321       *   F=0 : single or multiple NVT notification
1322       *   F=1 : User level Event-Based Branch (EBB) notification, no
1323       *         priority
1324       */
1325      format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1326      priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1327  
1328      /* The END is masked */
1329      if (format == 0 && priority == 0xff) {
1330          return;
1331      }
1332  
1333      /*
1334       * Check the END ESn (Event State Buffer for notification) for
1335       * even futher coalescing in the Router
1336       */
1337      if (!xive_end_is_notify(&end)) {
1338          uint8_t pq = xive_get_field32(END_W1_ESn, end.w1);
1339          bool notify = xive_esb_trigger(&pq);
1340  
1341          if (pq != xive_get_field32(END_W1_ESn, end.w1)) {
1342              end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq);
1343              xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1344          }
1345  
1346          /* ESn[Q]=1 : end of notification */
1347          if (!notify) {
1348              return;
1349          }
1350      }
1351  
1352      /*
1353       * Follows IVPE notification
1354       */
1355      xive_presenter_notify(xrtr, format,
1356                            xive_get_field32(END_W6_NVT_BLOCK, end.w6),
1357                            xive_get_field32(END_W6_NVT_INDEX, end.w6),
1358                            xive_get_field32(END_W7_F0_IGNORE, end.w7),
1359                            priority,
1360                            xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1361  
1362      /* TODO: Auto EOI. */
1363  }
1364  
1365  static void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
1366  {
1367      XiveRouter *xrtr = XIVE_ROUTER(xn);
1368      uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn);
1369      uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn);
1370      XiveEAS eas;
1371  
1372      /* EAS cache lookup */
1373      if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1374          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1375          return;
1376      }
1377  
1378      /*
1379       * The IVRE checks the State Bit Cache at this point. We skip the
1380       * SBC lookup because the state bits of the sources are modeled
1381       * internally in QEMU.
1382       */
1383  
1384      if (!xive_eas_is_valid(&eas)) {
1385          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1386          return;
1387      }
1388  
1389      if (xive_eas_is_masked(&eas)) {
1390          /* Notification completed */
1391          return;
1392      }
1393  
1394      /*
1395       * The event trigger becomes an END trigger
1396       */
1397      xive_router_end_notify(xrtr,
1398                             xive_get_field64(EAS_END_BLOCK, eas.w),
1399                             xive_get_field64(EAS_END_INDEX, eas.w),
1400                             xive_get_field64(EAS_END_DATA,  eas.w));
1401  }
1402  
1403  static void xive_router_class_init(ObjectClass *klass, void *data)
1404  {
1405      DeviceClass *dc = DEVICE_CLASS(klass);
1406      XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1407  
1408      dc->desc    = "XIVE Router Engine";
1409      xnc->notify = xive_router_notify;
1410  }
1411  
1412  static const TypeInfo xive_router_info = {
1413      .name          = TYPE_XIVE_ROUTER,
1414      .parent        = TYPE_SYS_BUS_DEVICE,
1415      .abstract      = true,
1416      .class_size    = sizeof(XiveRouterClass),
1417      .class_init    = xive_router_class_init,
1418      .interfaces    = (InterfaceInfo[]) {
1419          { TYPE_XIVE_NOTIFIER },
1420          { }
1421      }
1422  };
1423  
1424  void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1425  {
1426      if (!xive_eas_is_valid(eas)) {
1427          return;
1428      }
1429  
1430      monitor_printf(mon, "  %08x %s end:%02x/%04x data:%08x\n",
1431                     lisn, xive_eas_is_masked(eas) ? "M" : " ",
1432                     (uint8_t)  xive_get_field64(EAS_END_BLOCK, eas->w),
1433                     (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1434                     (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1435  }
1436  
1437  /*
1438   * END ESB MMIO loads
1439   */
1440  static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1441  {
1442      XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1443      uint32_t offset = addr & 0xFFF;
1444      uint8_t end_blk;
1445      uint32_t end_idx;
1446      XiveEND end;
1447      uint32_t end_esmask;
1448      uint8_t pq;
1449      uint64_t ret = -1;
1450  
1451      end_blk = xsrc->block_id;
1452      end_idx = addr >> (xsrc->esb_shift + 1);
1453  
1454      if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1455          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1456                        end_idx);
1457          return -1;
1458      }
1459  
1460      if (!xive_end_is_valid(&end)) {
1461          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1462                        end_blk, end_idx);
1463          return -1;
1464      }
1465  
1466      end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1467      pq = xive_get_field32(end_esmask, end.w1);
1468  
1469      switch (offset) {
1470      case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1471          ret = xive_esb_eoi(&pq);
1472  
1473          /* Forward the source event notification for routing ?? */
1474          break;
1475  
1476      case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1477          ret = pq;
1478          break;
1479  
1480      case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1481      case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1482      case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1483      case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1484          ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1485          break;
1486      default:
1487          qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1488                        offset);
1489          return -1;
1490      }
1491  
1492      if (pq != xive_get_field32(end_esmask, end.w1)) {
1493          end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1494          xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1495      }
1496  
1497      return ret;
1498  }
1499  
1500  /*
1501   * END ESB MMIO stores are invalid
1502   */
1503  static void xive_end_source_write(void *opaque, hwaddr addr,
1504                                    uint64_t value, unsigned size)
1505  {
1506      qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1507                    HWADDR_PRIx"\n", addr);
1508  }
1509  
1510  static const MemoryRegionOps xive_end_source_ops = {
1511      .read = xive_end_source_read,
1512      .write = xive_end_source_write,
1513      .endianness = DEVICE_BIG_ENDIAN,
1514      .valid = {
1515          .min_access_size = 8,
1516          .max_access_size = 8,
1517      },
1518      .impl = {
1519          .min_access_size = 8,
1520          .max_access_size = 8,
1521      },
1522  };
1523  
1524  static void xive_end_source_realize(DeviceState *dev, Error **errp)
1525  {
1526      XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1527      Object *obj;
1528      Error *local_err = NULL;
1529  
1530      obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1531      if (!obj) {
1532          error_propagate(errp, local_err);
1533          error_prepend(errp, "required link 'xive' not found: ");
1534          return;
1535      }
1536  
1537      xsrc->xrtr = XIVE_ROUTER(obj);
1538  
1539      if (!xsrc->nr_ends) {
1540          error_setg(errp, "Number of interrupt needs to be greater than 0");
1541          return;
1542      }
1543  
1544      if (xsrc->esb_shift != XIVE_ESB_4K &&
1545          xsrc->esb_shift != XIVE_ESB_64K) {
1546          error_setg(errp, "Invalid ESB shift setting");
1547          return;
1548      }
1549  
1550      /*
1551       * Each END is assigned an even/odd pair of MMIO pages, the even page
1552       * manages the ESn field while the odd page manages the ESe field.
1553       */
1554      memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1555                            &xive_end_source_ops, xsrc, "xive.end",
1556                            (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1557  }
1558  
1559  static Property xive_end_source_properties[] = {
1560      DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0),
1561      DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
1562      DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
1563      DEFINE_PROP_END_OF_LIST(),
1564  };
1565  
1566  static void xive_end_source_class_init(ObjectClass *klass, void *data)
1567  {
1568      DeviceClass *dc = DEVICE_CLASS(klass);
1569  
1570      dc->desc    = "XIVE END Source";
1571      dc->props   = xive_end_source_properties;
1572      dc->realize = xive_end_source_realize;
1573  }
1574  
1575  static const TypeInfo xive_end_source_info = {
1576      .name          = TYPE_XIVE_END_SOURCE,
1577      .parent        = TYPE_DEVICE,
1578      .instance_size = sizeof(XiveENDSource),
1579      .class_init    = xive_end_source_class_init,
1580  };
1581  
1582  /*
1583   * XIVE Notifier
1584   */
1585  static const TypeInfo xive_notifier_info = {
1586      .name = TYPE_XIVE_NOTIFIER,
1587      .parent = TYPE_INTERFACE,
1588      .class_size = sizeof(XiveNotifierClass),
1589  };
1590  
1591  static void xive_register_types(void)
1592  {
1593      type_register_static(&xive_source_info);
1594      type_register_static(&xive_notifier_info);
1595      type_register_static(&xive_router_info);
1596      type_register_static(&xive_end_source_info);
1597      type_register_static(&xive_tctx_info);
1598  }
1599  
1600  type_init(xive_register_types)
1601