xref: /openbmc/qemu/hw/intc/spapr_xive_kvm.c (revision 277dd3d7712ae056d2614ea164f6560afd5d71d4)
1 /*
2  * QEMU PowerPC sPAPR XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/kvm.h"
17 #include "hw/ppc/spapr.h"
18 #include "hw/ppc/spapr_cpu_core.h"
19 #include "hw/ppc/spapr_xive.h"
20 #include "hw/ppc/xive.h"
21 #include "kvm_ppc.h"
22 
23 #include <sys/ioctl.h>
24 
25 /*
26  * Helpers for CPU hotplug
27  *
28  * TODO: make a common KVMEnabledCPU layer for XICS and XIVE
29  */
30 typedef struct KVMEnabledCPU {
31     unsigned long vcpu_id;
32     QLIST_ENTRY(KVMEnabledCPU) node;
33 } KVMEnabledCPU;
34 
35 static QLIST_HEAD(, KVMEnabledCPU)
36     kvm_enabled_cpus = QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus);
37 
38 static bool kvm_cpu_is_enabled(CPUState *cs)
39 {
40     KVMEnabledCPU *enabled_cpu;
41     unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
42 
43     QLIST_FOREACH(enabled_cpu, &kvm_enabled_cpus, node) {
44         if (enabled_cpu->vcpu_id == vcpu_id) {
45             return true;
46         }
47     }
48     return false;
49 }
50 
51 static void kvm_cpu_enable(CPUState *cs)
52 {
53     KVMEnabledCPU *enabled_cpu;
54     unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
55 
56     enabled_cpu = g_malloc(sizeof(*enabled_cpu));
57     enabled_cpu->vcpu_id = vcpu_id;
58     QLIST_INSERT_HEAD(&kvm_enabled_cpus, enabled_cpu, node);
59 }
60 
61 /*
62  * XIVE Thread Interrupt Management context (KVM)
63  */
64 
65 static void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp)
66 {
67     uint64_t state[2];
68     int ret;
69 
70     /* word0 and word1 of the OS ring. */
71     state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]);
72 
73     ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
74     if (ret != 0) {
75         error_setg_errno(errp, errno,
76                          "XIVE: could not restore KVM state of CPU %ld",
77                          kvm_arch_vcpu_id(tctx->cs));
78     }
79 }
80 
81 void kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp)
82 {
83     uint64_t state[2] = { 0 };
84     int ret;
85 
86     ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
87     if (ret != 0) {
88         error_setg_errno(errp, errno,
89                          "XIVE: could not capture KVM state of CPU %ld",
90                          kvm_arch_vcpu_id(tctx->cs));
91         return;
92     }
93 
94     /* word0 and word1 of the OS ring. */
95     *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0];
96 }
97 
98 typedef struct {
99     XiveTCTX *tctx;
100     Error *err;
101 } XiveCpuGetState;
102 
103 static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu,
104                                                  run_on_cpu_data arg)
105 {
106     XiveCpuGetState *s = arg.host_ptr;
107 
108     kvmppc_xive_cpu_get_state(s->tctx, &s->err);
109 }
110 
111 void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp)
112 {
113     XiveCpuGetState s = {
114         .tctx = tctx,
115         .err = NULL,
116     };
117 
118     /*
119      * Kick the vCPU to make sure they are available for the KVM ioctl.
120      */
121     run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state,
122                RUN_ON_CPU_HOST_PTR(&s));
123 
124     if (s.err) {
125         error_propagate(errp, s.err);
126         return;
127     }
128 }
129 
130 void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
131 {
132     SpaprXive *xive = SPAPR_MACHINE(qdev_get_machine())->xive;
133     unsigned long vcpu_id;
134     int ret;
135 
136     /* Check if CPU was hot unplugged and replugged. */
137     if (kvm_cpu_is_enabled(tctx->cs)) {
138         return;
139     }
140 
141     vcpu_id = kvm_arch_vcpu_id(tctx->cs);
142 
143     ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd,
144                               vcpu_id, 0);
145     if (ret < 0) {
146         error_setg(errp, "XIVE: unable to connect CPU%ld to KVM device: %s",
147                    vcpu_id, strerror(errno));
148         return;
149     }
150 
151     kvm_cpu_enable(tctx->cs);
152 }
153 
154 /*
155  * XIVE Interrupt Source (KVM)
156  */
157 
158 void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
159                                    Error **errp)
160 {
161     uint32_t end_idx;
162     uint32_t end_blk;
163     uint8_t priority;
164     uint32_t server;
165     bool masked;
166     uint32_t eisn;
167     uint64_t kvm_src;
168     Error *local_err = NULL;
169 
170     assert(xive_eas_is_valid(eas));
171 
172     end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
173     end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
174     eisn = xive_get_field64(EAS_END_DATA, eas->w);
175     masked = xive_eas_is_masked(eas);
176 
177     spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
178 
179     kvm_src = priority << KVM_XIVE_SOURCE_PRIORITY_SHIFT &
180         KVM_XIVE_SOURCE_PRIORITY_MASK;
181     kvm_src |= server << KVM_XIVE_SOURCE_SERVER_SHIFT &
182         KVM_XIVE_SOURCE_SERVER_MASK;
183     kvm_src |= ((uint64_t) masked << KVM_XIVE_SOURCE_MASKED_SHIFT) &
184         KVM_XIVE_SOURCE_MASKED_MASK;
185     kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) &
186         KVM_XIVE_SOURCE_EISN_MASK;
187 
188     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn,
189                       &kvm_src, true, &local_err);
190     if (local_err) {
191         error_propagate(errp, local_err);
192         return;
193     }
194 }
195 
196 void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp)
197 {
198     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_SYNC, lisn,
199                       NULL, true, errp);
200 }
201 
202 /*
203  * At reset, the interrupt sources are simply created and MASKED. We
204  * only need to inform the KVM XIVE device about their type: LSI or
205  * MSI.
206  */
207 void kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp)
208 {
209     SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
210     uint64_t state = 0;
211 
212     if (xive_source_irq_is_lsi(xsrc, srcno)) {
213         state |= KVM_XIVE_LEVEL_SENSITIVE;
214         if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
215             state |= KVM_XIVE_LEVEL_ASSERTED;
216         }
217     }
218 
219     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, srcno, &state,
220                       true, errp);
221 }
222 
223 void kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp)
224 {
225     int i;
226 
227     for (i = 0; i < xsrc->nr_irqs; i++) {
228         Error *local_err = NULL;
229 
230         kvmppc_xive_source_reset_one(xsrc, i, &local_err);
231         if (local_err) {
232             error_propagate(errp, local_err);
233             return;
234         }
235     }
236 }
237 
238 /*
239  * This is used to perform the magic loads on the ESB pages, described
240  * in xive.h.
241  *
242  * Memory barriers should not be needed for loads (no store for now).
243  */
244 static uint64_t xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
245                             uint64_t data, bool write)
246 {
247     uint64_t *addr = xsrc->esb_mmap + xive_source_esb_mgmt(xsrc, srcno) +
248         offset;
249 
250     if (write) {
251         *addr = cpu_to_be64(data);
252         return -1;
253     } else {
254         /* Prevent the compiler from optimizing away the load */
255         volatile uint64_t value = be64_to_cpu(*addr);
256         return value;
257     }
258 }
259 
260 static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset)
261 {
262     return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3;
263 }
264 
265 static void xive_esb_trigger(XiveSource *xsrc, int srcno)
266 {
267     uint64_t *addr = xsrc->esb_mmap + xive_source_esb_page(xsrc, srcno);
268 
269     *addr = 0x0;
270 }
271 
272 uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
273                             uint64_t data, bool write)
274 {
275     if (write) {
276         return xive_esb_rw(xsrc, srcno, offset, data, 1);
277     }
278 
279     /*
280      * Special Load EOI handling for LSI sources. Q bit is never set
281      * and the interrupt should be re-triggered if the level is still
282      * asserted.
283      */
284     if (xive_source_irq_is_lsi(xsrc, srcno) &&
285         offset == XIVE_ESB_LOAD_EOI) {
286         xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00);
287         if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
288             xive_esb_trigger(xsrc, srcno);
289         }
290         return 0;
291     } else {
292         return xive_esb_rw(xsrc, srcno, offset, 0, 0);
293     }
294 }
295 
296 static void kvmppc_xive_source_get_state(XiveSource *xsrc)
297 {
298     int i;
299 
300     for (i = 0; i < xsrc->nr_irqs; i++) {
301         /* Perform a load without side effect to retrieve the PQ bits */
302         uint8_t pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
303 
304         /* and save PQ locally */
305         xive_source_esb_set(xsrc, i, pq);
306     }
307 }
308 
309 void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val)
310 {
311     XiveSource *xsrc = opaque;
312     struct kvm_irq_level args;
313     int rc;
314 
315     args.irq = srcno;
316     if (!xive_source_irq_is_lsi(xsrc, srcno)) {
317         if (!val) {
318             return;
319         }
320         args.level = KVM_INTERRUPT_SET;
321     } else {
322         if (val) {
323             xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
324             args.level = KVM_INTERRUPT_SET_LEVEL;
325         } else {
326             xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
327             args.level = KVM_INTERRUPT_UNSET;
328         }
329     }
330     rc = kvm_vm_ioctl(kvm_state, KVM_IRQ_LINE, &args);
331     if (rc < 0) {
332         error_report("XIVE: kvm_irq_line() failed : %s", strerror(errno));
333     }
334 }
335 
336 /*
337  * sPAPR XIVE interrupt controller (KVM)
338  */
339 void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
340                                   uint32_t end_idx, XiveEND *end,
341                                   Error **errp)
342 {
343     struct kvm_ppc_xive_eq kvm_eq = { 0 };
344     uint64_t kvm_eq_idx;
345     uint8_t priority;
346     uint32_t server;
347     Error *local_err = NULL;
348 
349     assert(xive_end_is_valid(end));
350 
351     /* Encode the tuple (server, prio) as a KVM EQ index */
352     spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
353 
354     kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
355             KVM_XIVE_EQ_PRIORITY_MASK;
356     kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
357         KVM_XIVE_EQ_SERVER_MASK;
358 
359     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
360                       &kvm_eq, false, &local_err);
361     if (local_err) {
362         error_propagate(errp, local_err);
363         return;
364     }
365 
366     /*
367      * The EQ index and toggle bit are updated by HW. These are the
368      * only fields from KVM we want to update QEMU with. The other END
369      * fields should already be in the QEMU END table.
370      */
371     end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) |
372         xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex);
373 }
374 
375 void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
376                                   uint32_t end_idx, XiveEND *end,
377                                   Error **errp)
378 {
379     struct kvm_ppc_xive_eq kvm_eq = { 0 };
380     uint64_t kvm_eq_idx;
381     uint8_t priority;
382     uint32_t server;
383     Error *local_err = NULL;
384 
385     /*
386      * Build the KVM state from the local END structure.
387      */
388 
389     kvm_eq.flags = 0;
390     if (xive_get_field32(END_W0_UCOND_NOTIFY, end->w0)) {
391         kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
392     }
393 
394     /*
395      * If the hcall is disabling the EQ, set the size and page address
396      * to zero. When migrating, only valid ENDs are taken into
397      * account.
398      */
399     if (xive_end_is_valid(end)) {
400         kvm_eq.qshift = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
401         kvm_eq.qaddr  = xive_end_qaddr(end);
402         /*
403          * The EQ toggle bit and index should only be relevant when
404          * restoring the EQ state
405          */
406         kvm_eq.qtoggle = xive_get_field32(END_W1_GENERATION, end->w1);
407         kvm_eq.qindex  = xive_get_field32(END_W1_PAGE_OFF, end->w1);
408     } else {
409         kvm_eq.qshift = 0;
410         kvm_eq.qaddr  = 0;
411     }
412 
413     /* Encode the tuple (server, prio) as a KVM EQ index */
414     spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
415 
416     kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
417             KVM_XIVE_EQ_PRIORITY_MASK;
418     kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
419         KVM_XIVE_EQ_SERVER_MASK;
420 
421     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
422                       &kvm_eq, true, &local_err);
423     if (local_err) {
424         error_propagate(errp, local_err);
425         return;
426     }
427 }
428 
429 void kvmppc_xive_reset(SpaprXive *xive, Error **errp)
430 {
431     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_RESET,
432                       NULL, true, errp);
433 }
434 
435 static void kvmppc_xive_get_queues(SpaprXive *xive, Error **errp)
436 {
437     Error *local_err = NULL;
438     int i;
439 
440     for (i = 0; i < xive->nr_ends; i++) {
441         if (!xive_end_is_valid(&xive->endt[i])) {
442             continue;
443         }
444 
445         kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
446                                      &xive->endt[i], &local_err);
447         if (local_err) {
448             error_propagate(errp, local_err);
449             return;
450         }
451     }
452 }
453 
454 /*
455  * The primary goal of the XIVE VM change handler is to mark the EQ
456  * pages dirty when all XIVE event notifications have stopped.
457  *
458  * Whenever the VM is stopped, the VM change handler sets the source
459  * PQs to PENDING to stop the flow of events and to possibly catch a
460  * triggered interrupt occuring while the VM is stopped. The previous
461  * state is saved in anticipation of a migration. The XIVE controller
462  * is then synced through KVM to flush any in-flight event
463  * notification and stabilize the EQs.
464  *
465  * At this stage, we can mark the EQ page dirty and let a migration
466  * sequence transfer the EQ pages to the destination, which is done
467  * just after the stop state.
468  *
469  * The previous configuration of the sources is restored when the VM
470  * runs again. If an interrupt was queued while the VM was stopped,
471  * simply generate a trigger.
472  */
473 static void kvmppc_xive_change_state_handler(void *opaque, int running,
474                                              RunState state)
475 {
476     SpaprXive *xive = opaque;
477     XiveSource *xsrc = &xive->source;
478     Error *local_err = NULL;
479     int i;
480 
481     /*
482      * Restore the sources to their initial state. This is called when
483      * the VM resumes after a stop or a migration.
484      */
485     if (running) {
486         for (i = 0; i < xsrc->nr_irqs; i++) {
487             uint8_t pq = xive_source_esb_get(xsrc, i);
488             uint8_t old_pq;
489 
490             old_pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_00 + (pq << 8));
491 
492             /*
493              * An interrupt was queued while the VM was stopped,
494              * generate a trigger.
495              */
496             if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) {
497                 xive_esb_trigger(xsrc, i);
498             }
499         }
500 
501         return;
502     }
503 
504     /*
505      * Mask the sources, to stop the flow of event notifications, and
506      * save the PQs locally in the XiveSource object. The XiveSource
507      * state will be collected later on by its vmstate handler if a
508      * migration is in progress.
509      */
510     for (i = 0; i < xsrc->nr_irqs; i++) {
511         uint8_t pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
512 
513         /*
514          * PQ is set to PENDING to possibly catch a triggered
515          * interrupt occuring while the VM is stopped (hotplug event
516          * for instance) .
517          */
518         if (pq != XIVE_ESB_OFF) {
519             pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_10);
520         }
521         xive_source_esb_set(xsrc, i, pq);
522     }
523 
524     /*
525      * Sync the XIVE controller in KVM, to flush in-flight event
526      * notification that should be enqueued in the EQs and mark the
527      * XIVE EQ pages dirty to collect all updates.
528      */
529     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
530                       KVM_DEV_XIVE_EQ_SYNC, NULL, true, &local_err);
531     if (local_err) {
532         error_report_err(local_err);
533         return;
534     }
535 }
536 
537 void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp)
538 {
539     /*
540      * When the VM is stopped, the sources are masked and the previous
541      * state is saved in anticipation of a migration. We should not
542      * synchronize the source state in that case else we will override
543      * the saved state.
544      */
545     if (runstate_is_running()) {
546         kvmppc_xive_source_get_state(&xive->source);
547     }
548 
549     /* EAT: there is no extra state to query from KVM */
550 
551     /* ENDT */
552     kvmppc_xive_get_queues(xive, errp);
553 }
554 
555 /*
556  * The SpaprXive 'pre_save' method is called by the vmstate handler of
557  * the SpaprXive model, after the XIVE controller is synced in the VM
558  * change handler.
559  */
560 int kvmppc_xive_pre_save(SpaprXive *xive)
561 {
562     Error *local_err = NULL;
563 
564     /* EAT: there is no extra state to query from KVM */
565 
566     /* ENDT */
567     kvmppc_xive_get_queues(xive, &local_err);
568     if (local_err) {
569         error_report_err(local_err);
570         return -1;
571     }
572 
573     return 0;
574 }
575 
576 /*
577  * The SpaprXive 'post_load' method is not called by a vmstate
578  * handler. It is called at the sPAPR machine level at the end of the
579  * migration sequence by the sPAPR IRQ backend 'post_load' method,
580  * when all XIVE states have been transferred and loaded.
581  */
582 int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
583 {
584     Error *local_err = NULL;
585     CPUState *cs;
586     int i;
587 
588     /* Restore the ENDT first. The targetting depends on it. */
589     for (i = 0; i < xive->nr_ends; i++) {
590         if (!xive_end_is_valid(&xive->endt[i])) {
591             continue;
592         }
593 
594         kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
595                                      &xive->endt[i], &local_err);
596         if (local_err) {
597             error_report_err(local_err);
598             return -1;
599         }
600     }
601 
602     /* Restore the EAT */
603     for (i = 0; i < xive->nr_irqs; i++) {
604         if (!xive_eas_is_valid(&xive->eat[i])) {
605             continue;
606         }
607 
608         kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err);
609         if (local_err) {
610             error_report_err(local_err);
611             return -1;
612         }
613     }
614 
615     /* Restore the thread interrupt contexts */
616     CPU_FOREACH(cs) {
617         PowerPCCPU *cpu = POWERPC_CPU(cs);
618 
619         kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err);
620         if (local_err) {
621             error_report_err(local_err);
622             return -1;
623         }
624     }
625 
626     /* The source states will be restored when the machine starts running */
627     return 0;
628 }
629 
630 static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len,
631                               Error **errp)
632 {
633     void *addr;
634     uint32_t page_shift = 16; /* TODO: fix page_shift */
635 
636     addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, xive->fd,
637                 pgoff << page_shift);
638     if (addr == MAP_FAILED) {
639         error_setg_errno(errp, errno, "XIVE: unable to set memory mapping");
640         return NULL;
641     }
642 
643     return addr;
644 }
645 
646 /*
647  * All the XIVE memory regions are now backed by mappings from the KVM
648  * XIVE device.
649  */
650 void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
651 {
652     XiveSource *xsrc = &xive->source;
653     XiveENDSource *end_xsrc = &xive->end_source;
654     Error *local_err = NULL;
655     size_t esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
656     size_t tima_len = 4ull << TM_SHIFT;
657 
658     if (!kvmppc_has_cap_xive()) {
659         error_setg(errp, "IRQ_XIVE capability must be present for KVM");
660         return;
661     }
662 
663     /* First, create the KVM XIVE device */
664     xive->fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false);
665     if (xive->fd < 0) {
666         error_setg_errno(errp, -xive->fd, "XIVE: error creating KVM device");
667         return;
668     }
669 
670     /*
671      * 1. Source ESB pages - KVM mapping
672      */
673     xsrc->esb_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len,
674                                       &local_err);
675     if (local_err) {
676         error_propagate(errp, local_err);
677         return;
678     }
679 
680     memory_region_init_ram_device_ptr(&xsrc->esb_mmio, OBJECT(xsrc),
681                                       "xive.esb", esb_len, xsrc->esb_mmap);
682     sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
683 
684     /*
685      * 2. END ESB pages (No KVM support yet)
686      */
687     sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
688 
689     /*
690      * 3. TIMA pages - KVM mapping
691      */
692     xive->tm_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len,
693                                      &local_err);
694     if (local_err) {
695         error_propagate(errp, local_err);
696         return;
697     }
698     memory_region_init_ram_device_ptr(&xive->tm_mmio, OBJECT(xive),
699                                       "xive.tima", tima_len, xive->tm_mmap);
700     sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
701 
702     xive->change = qemu_add_vm_change_state_handler(
703         kvmppc_xive_change_state_handler, xive);
704 
705     kvm_kernel_irqchip = true;
706     kvm_msi_via_irqfd_allowed = true;
707     kvm_gsi_direct_mapping = true;
708 
709     /* Map all regions */
710     spapr_xive_map_mmio(xive);
711 }
712