xref: /openbmc/qemu/hw/intc/spapr_xive_kvm.c (revision f9a548edf2a54f59c37032dee3763f532e968fee)
1 /*
2  * QEMU PowerPC sPAPR XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/kvm.h"
17 #include "sysemu/runstate.h"
18 #include "hw/ppc/spapr.h"
19 #include "hw/ppc/spapr_cpu_core.h"
20 #include "hw/ppc/spapr_xive.h"
21 #include "hw/ppc/xive.h"
22 #include "kvm_ppc.h"
23 
24 #include <sys/ioctl.h>
25 
26 /*
27  * Helpers for CPU hotplug
28  *
29  * TODO: make a common KVMEnabledCPU layer for XICS and XIVE
30  */
31 typedef struct KVMEnabledCPU {
32     unsigned long vcpu_id;
33     QLIST_ENTRY(KVMEnabledCPU) node;
34 } KVMEnabledCPU;
35 
36 static QLIST_HEAD(, KVMEnabledCPU)
37     kvm_enabled_cpus = QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus);
38 
39 static bool kvm_cpu_is_enabled(CPUState *cs)
40 {
41     KVMEnabledCPU *enabled_cpu;
42     unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
43 
44     QLIST_FOREACH(enabled_cpu, &kvm_enabled_cpus, node) {
45         if (enabled_cpu->vcpu_id == vcpu_id) {
46             return true;
47         }
48     }
49     return false;
50 }
51 
52 static void kvm_cpu_enable(CPUState *cs)
53 {
54     KVMEnabledCPU *enabled_cpu;
55     unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
56 
57     enabled_cpu = g_malloc(sizeof(*enabled_cpu));
58     enabled_cpu->vcpu_id = vcpu_id;
59     QLIST_INSERT_HEAD(&kvm_enabled_cpus, enabled_cpu, node);
60 }
61 
62 static void kvm_cpu_disable_all(void)
63 {
64     KVMEnabledCPU *enabled_cpu, *next;
65 
66     QLIST_FOREACH_SAFE(enabled_cpu, &kvm_enabled_cpus, node, next) {
67         QLIST_REMOVE(enabled_cpu, node);
68         g_free(enabled_cpu);
69     }
70 }
71 
72 /*
73  * XIVE Thread Interrupt Management context (KVM)
74  */
75 
76 int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp)
77 {
78     SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
79     uint64_t state[2];
80     int ret;
81 
82     assert(xive->fd != -1);
83 
84     /* word0 and word1 of the OS ring. */
85     state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]);
86 
87     ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
88     if (ret != 0) {
89         error_setg_errno(errp, -ret,
90                          "XIVE: could not restore KVM state of CPU %ld",
91                          kvm_arch_vcpu_id(tctx->cs));
92         return ret;
93     }
94 
95     return 0;
96 }
97 
98 int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp)
99 {
100     SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
101     uint64_t state[2] = { 0 };
102     int ret;
103 
104     assert(xive->fd != -1);
105 
106     ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
107     if (ret != 0) {
108         error_setg_errno(errp, -ret,
109                          "XIVE: could not capture KVM state of CPU %ld",
110                          kvm_arch_vcpu_id(tctx->cs));
111         return ret;
112     }
113 
114     /* word0 and word1 of the OS ring. */
115     *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0];
116 
117     return 0;
118 }
119 
120 typedef struct {
121     XiveTCTX *tctx;
122     Error *err;
123 } XiveCpuGetState;
124 
125 static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu,
126                                                  run_on_cpu_data arg)
127 {
128     XiveCpuGetState *s = arg.host_ptr;
129 
130     kvmppc_xive_cpu_get_state(s->tctx, &s->err);
131 }
132 
133 void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp)
134 {
135     XiveCpuGetState s = {
136         .tctx = tctx,
137         .err = NULL,
138     };
139 
140     /*
141      * Kick the vCPU to make sure they are available for the KVM ioctl.
142      */
143     run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state,
144                RUN_ON_CPU_HOST_PTR(&s));
145 
146     if (s.err) {
147         error_propagate(errp, s.err);
148         return;
149     }
150 }
151 
152 int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
153 {
154     ERRP_GUARD();
155     SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
156     unsigned long vcpu_id;
157     int ret;
158 
159     assert(xive->fd != -1);
160 
161     /* Check if CPU was hot unplugged and replugged. */
162     if (kvm_cpu_is_enabled(tctx->cs)) {
163         return 0;
164     }
165 
166     vcpu_id = kvm_arch_vcpu_id(tctx->cs);
167 
168     ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd,
169                               vcpu_id, 0);
170     if (ret < 0) {
171         error_setg_errno(errp, -ret,
172                          "XIVE: unable to connect CPU%ld to KVM device",
173                          vcpu_id);
174         if (ret == -ENOSPC) {
175             error_append_hint(errp, "Try -smp maxcpus=N with N < %u\n",
176                               MACHINE(qdev_get_machine())->smp.max_cpus);
177         }
178         return ret;
179     }
180 
181     kvm_cpu_enable(tctx->cs);
182     return 0;
183 }
184 
185 /*
186  * XIVE Interrupt Source (KVM)
187  */
188 
189 void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
190                                    Error **errp)
191 {
192     uint32_t end_idx;
193     uint32_t end_blk;
194     uint8_t priority;
195     uint32_t server;
196     bool masked;
197     uint32_t eisn;
198     uint64_t kvm_src;
199     Error *local_err = NULL;
200 
201     assert(xive_eas_is_valid(eas));
202 
203     end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
204     end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
205     eisn = xive_get_field64(EAS_END_DATA, eas->w);
206     masked = xive_eas_is_masked(eas);
207 
208     spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
209 
210     kvm_src = priority << KVM_XIVE_SOURCE_PRIORITY_SHIFT &
211         KVM_XIVE_SOURCE_PRIORITY_MASK;
212     kvm_src |= server << KVM_XIVE_SOURCE_SERVER_SHIFT &
213         KVM_XIVE_SOURCE_SERVER_MASK;
214     kvm_src |= ((uint64_t) masked << KVM_XIVE_SOURCE_MASKED_SHIFT) &
215         KVM_XIVE_SOURCE_MASKED_MASK;
216     kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) &
217         KVM_XIVE_SOURCE_EISN_MASK;
218 
219     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn,
220                       &kvm_src, true, &local_err);
221     if (local_err) {
222         error_propagate(errp, local_err);
223         return;
224     }
225 }
226 
227 void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp)
228 {
229     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_SYNC, lisn,
230                       NULL, true, errp);
231 }
232 
233 /*
234  * At reset, the interrupt sources are simply created and MASKED. We
235  * only need to inform the KVM XIVE device about their type: LSI or
236  * MSI.
237  */
238 int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp)
239 {
240     SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
241     uint64_t state = 0;
242 
243     assert(xive->fd != -1);
244 
245     if (xive_source_irq_is_lsi(xsrc, srcno)) {
246         state |= KVM_XIVE_LEVEL_SENSITIVE;
247         if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
248             state |= KVM_XIVE_LEVEL_ASSERTED;
249         }
250     }
251 
252     return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, srcno, &state,
253                              true, errp);
254 }
255 
256 static int kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp)
257 {
258     SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
259     int i;
260 
261     for (i = 0; i < xsrc->nr_irqs; i++) {
262         int ret;
263 
264         if (!xive_eas_is_valid(&xive->eat[i])) {
265             continue;
266         }
267 
268         ret = kvmppc_xive_source_reset_one(xsrc, i, errp);
269         if (ret < 0) {
270             return ret;
271         }
272     }
273 
274     return 0;
275 }
276 
277 /*
278  * This is used to perform the magic loads on the ESB pages, described
279  * in xive.h.
280  *
281  * Memory barriers should not be needed for loads (no store for now).
282  */
283 static uint64_t xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
284                             uint64_t data, bool write)
285 {
286     uint64_t *addr = xsrc->esb_mmap + xive_source_esb_mgmt(xsrc, srcno) +
287         offset;
288 
289     if (write) {
290         *addr = cpu_to_be64(data);
291         return -1;
292     } else {
293         /* Prevent the compiler from optimizing away the load */
294         volatile uint64_t value = be64_to_cpu(*addr);
295         return value;
296     }
297 }
298 
299 static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset)
300 {
301     return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3;
302 }
303 
304 static void xive_esb_trigger(XiveSource *xsrc, int srcno)
305 {
306     uint64_t *addr = xsrc->esb_mmap + xive_source_esb_page(xsrc, srcno);
307 
308     *addr = 0x0;
309 }
310 
311 uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
312                             uint64_t data, bool write)
313 {
314     if (write) {
315         return xive_esb_rw(xsrc, srcno, offset, data, 1);
316     }
317 
318     /*
319      * Special Load EOI handling for LSI sources. Q bit is never set
320      * and the interrupt should be re-triggered if the level is still
321      * asserted.
322      */
323     if (xive_source_irq_is_lsi(xsrc, srcno) &&
324         offset == XIVE_ESB_LOAD_EOI) {
325         xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00);
326         if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
327             xive_esb_trigger(xsrc, srcno);
328         }
329         return 0;
330     } else {
331         return xive_esb_rw(xsrc, srcno, offset, 0, 0);
332     }
333 }
334 
335 static void kvmppc_xive_source_get_state(XiveSource *xsrc)
336 {
337     SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
338     int i;
339 
340     for (i = 0; i < xsrc->nr_irqs; i++) {
341         uint8_t pq;
342 
343         if (!xive_eas_is_valid(&xive->eat[i])) {
344             continue;
345         }
346 
347         /* Perform a load without side effect to retrieve the PQ bits */
348         pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
349 
350         /* and save PQ locally */
351         xive_source_esb_set(xsrc, i, pq);
352     }
353 }
354 
355 void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val)
356 {
357     XiveSource *xsrc = opaque;
358 
359     if (!xive_source_irq_is_lsi(xsrc, srcno)) {
360         if (!val) {
361             return;
362         }
363     } else {
364         if (val) {
365             xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
366         } else {
367             xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
368         }
369     }
370 
371     xive_esb_trigger(xsrc, srcno);
372 }
373 
374 /*
375  * sPAPR XIVE interrupt controller (KVM)
376  */
377 int kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
378                                  uint32_t end_idx, XiveEND *end,
379                                  Error **errp)
380 {
381     struct kvm_ppc_xive_eq kvm_eq = { 0 };
382     uint64_t kvm_eq_idx;
383     uint8_t priority;
384     uint32_t server;
385     int ret;
386 
387     assert(xive_end_is_valid(end));
388 
389     /* Encode the tuple (server, prio) as a KVM EQ index */
390     spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
391 
392     kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
393             KVM_XIVE_EQ_PRIORITY_MASK;
394     kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
395         KVM_XIVE_EQ_SERVER_MASK;
396 
397     ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
398                             &kvm_eq, false, errp);
399     if (ret < 0) {
400         return ret;
401     }
402 
403     /*
404      * The EQ index and toggle bit are updated by HW. These are the
405      * only fields from KVM we want to update QEMU with. The other END
406      * fields should already be in the QEMU END table.
407      */
408     end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) |
409         xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex);
410 
411     return 0;
412 }
413 
414 int kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
415                                  uint32_t end_idx, XiveEND *end,
416                                  Error **errp)
417 {
418     struct kvm_ppc_xive_eq kvm_eq = { 0 };
419     uint64_t kvm_eq_idx;
420     uint8_t priority;
421     uint32_t server;
422 
423     /*
424      * Build the KVM state from the local END structure.
425      */
426 
427     kvm_eq.flags = 0;
428     if (xive_get_field32(END_W0_UCOND_NOTIFY, end->w0)) {
429         kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
430     }
431 
432     /*
433      * If the hcall is disabling the EQ, set the size and page address
434      * to zero. When migrating, only valid ENDs are taken into
435      * account.
436      */
437     if (xive_end_is_valid(end)) {
438         kvm_eq.qshift = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
439         kvm_eq.qaddr  = xive_end_qaddr(end);
440         /*
441          * The EQ toggle bit and index should only be relevant when
442          * restoring the EQ state
443          */
444         kvm_eq.qtoggle = xive_get_field32(END_W1_GENERATION, end->w1);
445         kvm_eq.qindex  = xive_get_field32(END_W1_PAGE_OFF, end->w1);
446     } else {
447         kvm_eq.qshift = 0;
448         kvm_eq.qaddr  = 0;
449     }
450 
451     /* Encode the tuple (server, prio) as a KVM EQ index */
452     spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
453 
454     kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
455             KVM_XIVE_EQ_PRIORITY_MASK;
456     kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
457         KVM_XIVE_EQ_SERVER_MASK;
458 
459     return
460         kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
461                           &kvm_eq, true, errp);
462 }
463 
464 void kvmppc_xive_reset(SpaprXive *xive, Error **errp)
465 {
466     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_RESET,
467                       NULL, true, errp);
468 }
469 
470 static void kvmppc_xive_get_queues(SpaprXive *xive, Error **errp)
471 {
472     Error *local_err = NULL;
473     int i;
474 
475     for (i = 0; i < xive->nr_ends; i++) {
476         if (!xive_end_is_valid(&xive->endt[i])) {
477             continue;
478         }
479 
480         kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
481                                      &xive->endt[i], &local_err);
482         if (local_err) {
483             error_propagate(errp, local_err);
484             return;
485         }
486     }
487 }
488 
489 /*
490  * The primary goal of the XIVE VM change handler is to mark the EQ
491  * pages dirty when all XIVE event notifications have stopped.
492  *
493  * Whenever the VM is stopped, the VM change handler sets the source
494  * PQs to PENDING to stop the flow of events and to possibly catch a
495  * triggered interrupt occuring while the VM is stopped. The previous
496  * state is saved in anticipation of a migration. The XIVE controller
497  * is then synced through KVM to flush any in-flight event
498  * notification and stabilize the EQs.
499  *
500  * At this stage, we can mark the EQ page dirty and let a migration
501  * sequence transfer the EQ pages to the destination, which is done
502  * just after the stop state.
503  *
504  * The previous configuration of the sources is restored when the VM
505  * runs again. If an interrupt was queued while the VM was stopped,
506  * simply generate a trigger.
507  */
508 static void kvmppc_xive_change_state_handler(void *opaque, int running,
509                                              RunState state)
510 {
511     SpaprXive *xive = opaque;
512     XiveSource *xsrc = &xive->source;
513     Error *local_err = NULL;
514     int i;
515 
516     /*
517      * Restore the sources to their initial state. This is called when
518      * the VM resumes after a stop or a migration.
519      */
520     if (running) {
521         for (i = 0; i < xsrc->nr_irqs; i++) {
522             uint8_t pq;
523             uint8_t old_pq;
524 
525             if (!xive_eas_is_valid(&xive->eat[i])) {
526                 continue;
527             }
528 
529             pq = xive_source_esb_get(xsrc, i);
530             old_pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_00 + (pq << 8));
531 
532             /*
533              * An interrupt was queued while the VM was stopped,
534              * generate a trigger.
535              */
536             if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) {
537                 xive_esb_trigger(xsrc, i);
538             }
539         }
540 
541         return;
542     }
543 
544     /*
545      * Mask the sources, to stop the flow of event notifications, and
546      * save the PQs locally in the XiveSource object. The XiveSource
547      * state will be collected later on by its vmstate handler if a
548      * migration is in progress.
549      */
550     for (i = 0; i < xsrc->nr_irqs; i++) {
551         uint8_t pq;
552 
553         if (!xive_eas_is_valid(&xive->eat[i])) {
554             continue;
555         }
556 
557         pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
558 
559         /*
560          * PQ is set to PENDING to possibly catch a triggered
561          * interrupt occuring while the VM is stopped (hotplug event
562          * for instance) .
563          */
564         if (pq != XIVE_ESB_OFF) {
565             pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_10);
566         }
567         xive_source_esb_set(xsrc, i, pq);
568     }
569 
570     /*
571      * Sync the XIVE controller in KVM, to flush in-flight event
572      * notification that should be enqueued in the EQs and mark the
573      * XIVE EQ pages dirty to collect all updates.
574      */
575     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
576                       KVM_DEV_XIVE_EQ_SYNC, NULL, true, &local_err);
577     if (local_err) {
578         error_report_err(local_err);
579         return;
580     }
581 }
582 
583 void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp)
584 {
585     assert(xive->fd != -1);
586 
587     /*
588      * When the VM is stopped, the sources are masked and the previous
589      * state is saved in anticipation of a migration. We should not
590      * synchronize the source state in that case else we will override
591      * the saved state.
592      */
593     if (runstate_is_running()) {
594         kvmppc_xive_source_get_state(&xive->source);
595     }
596 
597     /* EAT: there is no extra state to query from KVM */
598 
599     /* ENDT */
600     kvmppc_xive_get_queues(xive, errp);
601 }
602 
603 /*
604  * The SpaprXive 'pre_save' method is called by the vmstate handler of
605  * the SpaprXive model, after the XIVE controller is synced in the VM
606  * change handler.
607  */
608 int kvmppc_xive_pre_save(SpaprXive *xive)
609 {
610     Error *local_err = NULL;
611 
612     assert(xive->fd != -1);
613 
614     /* EAT: there is no extra state to query from KVM */
615 
616     /* ENDT */
617     kvmppc_xive_get_queues(xive, &local_err);
618     if (local_err) {
619         error_report_err(local_err);
620         return -1;
621     }
622 
623     return 0;
624 }
625 
626 /*
627  * The SpaprXive 'post_load' method is not called by a vmstate
628  * handler. It is called at the sPAPR machine level at the end of the
629  * migration sequence by the sPAPR IRQ backend 'post_load' method,
630  * when all XIVE states have been transferred and loaded.
631  */
632 int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
633 {
634     Error *local_err = NULL;
635     CPUState *cs;
636     int i;
637 
638     /* The KVM XIVE device should be in use */
639     assert(xive->fd != -1);
640 
641     /* Restore the ENDT first. The targetting depends on it. */
642     for (i = 0; i < xive->nr_ends; i++) {
643         if (!xive_end_is_valid(&xive->endt[i])) {
644             continue;
645         }
646 
647         kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
648                                      &xive->endt[i], &local_err);
649         if (local_err) {
650             error_report_err(local_err);
651             return -1;
652         }
653     }
654 
655     /* Restore the EAT */
656     for (i = 0; i < xive->nr_irqs; i++) {
657         if (!xive_eas_is_valid(&xive->eat[i])) {
658             continue;
659         }
660 
661         /*
662          * We can only restore the source config if the source has been
663          * previously set in KVM. Since we don't do that for all interrupts
664          * at reset time anymore, let's do it now.
665          */
666         kvmppc_xive_source_reset_one(&xive->source, i, &local_err);
667         if (local_err) {
668             error_report_err(local_err);
669             return -1;
670         }
671 
672         kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err);
673         if (local_err) {
674             error_report_err(local_err);
675             return -1;
676         }
677     }
678 
679     /*
680      * Restore the thread interrupt contexts of initial CPUs.
681      *
682      * The context of hotplugged CPUs is restored later, by the
683      * 'post_load' handler of the XiveTCTX model because they are not
684      * available at the time the SpaprXive 'post_load' method is
685      * called. We can not restore the context of all CPUs in the
686      * 'post_load' handler of XiveTCTX because the machine is not
687      * necessarily connected to the KVM device at that time.
688      */
689     CPU_FOREACH(cs) {
690         PowerPCCPU *cpu = POWERPC_CPU(cs);
691 
692         kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err);
693         if (local_err) {
694             error_report_err(local_err);
695             return -1;
696         }
697     }
698 
699     /* The source states will be restored when the machine starts running */
700     return 0;
701 }
702 
703 /* Returns MAP_FAILED on error and sets errno */
704 static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len,
705                               Error **errp)
706 {
707     void *addr;
708     uint32_t page_shift = 16; /* TODO: fix page_shift */
709 
710     addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, xive->fd,
711                 pgoff << page_shift);
712     if (addr == MAP_FAILED) {
713         error_setg_errno(errp, errno, "XIVE: unable to set memory mapping");
714     }
715 
716     return addr;
717 }
718 
719 /*
720  * All the XIVE memory regions are now backed by mappings from the KVM
721  * XIVE device.
722  */
723 int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
724                         Error **errp)
725 {
726     SpaprXive *xive = SPAPR_XIVE(intc);
727     XiveSource *xsrc = &xive->source;
728     Error *local_err = NULL;
729     size_t esb_len = xive_source_esb_len(xsrc);
730     size_t tima_len = 4ull << TM_SHIFT;
731     CPUState *cs;
732     int fd;
733     void *addr;
734 
735     /*
736      * The KVM XIVE device already in use. This is the case when
737      * rebooting under the XIVE-only interrupt mode.
738      */
739     if (xive->fd != -1) {
740         return 0;
741     }
742 
743     if (!kvmppc_has_cap_xive()) {
744         error_setg(errp, "IRQ_XIVE capability must be present for KVM");
745         return -1;
746     }
747 
748     /* First, create the KVM XIVE device */
749     fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false);
750     if (fd < 0) {
751         error_setg_errno(errp, -fd, "XIVE: error creating KVM device");
752         return -1;
753     }
754     xive->fd = fd;
755 
756     /* Tell KVM about the # of VCPUs we may have */
757     if (kvm_device_check_attr(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
758                               KVM_DEV_XIVE_NR_SERVERS)) {
759         if (kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
760                               KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true,
761                               &local_err)) {
762             goto fail;
763         }
764     }
765 
766     /*
767      * 1. Source ESB pages - KVM mapping
768      */
769     addr = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len,
770                             &local_err);
771     if (addr == MAP_FAILED) {
772         goto fail;
773     }
774     xsrc->esb_mmap = addr;
775 
776     memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc),
777                                       "xive.esb-kvm", esb_len, xsrc->esb_mmap);
778     memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0,
779                                         &xsrc->esb_mmio_kvm, 1);
780 
781     /*
782      * 2. END ESB pages (No KVM support yet)
783      */
784 
785     /*
786      * 3. TIMA pages - KVM mapping
787      */
788     addr = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len,
789                             &local_err);
790     if (addr == MAP_FAILED) {
791         goto fail;
792     }
793     xive->tm_mmap = addr;
794 
795     memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive),
796                                       "xive.tima", tima_len, xive->tm_mmap);
797     memory_region_add_subregion_overlap(&xive->tm_mmio, 0,
798                                         &xive->tm_mmio_kvm, 1);
799 
800     xive->change = qemu_add_vm_change_state_handler(
801         kvmppc_xive_change_state_handler, xive);
802 
803     /* Connect the presenters to the initial VCPUs of the machine */
804     CPU_FOREACH(cs) {
805         PowerPCCPU *cpu = POWERPC_CPU(cs);
806 
807         kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, &local_err);
808         if (local_err) {
809             goto fail;
810         }
811     }
812 
813     /* Update the KVM sources */
814     kvmppc_xive_source_reset(xsrc, &local_err);
815     if (local_err) {
816         goto fail;
817     }
818 
819     kvm_kernel_irqchip = true;
820     kvm_msi_via_irqfd_allowed = true;
821     kvm_gsi_direct_mapping = true;
822     return 0;
823 
824 fail:
825     error_propagate(errp, local_err);
826     kvmppc_xive_disconnect(intc);
827     return -1;
828 }
829 
830 void kvmppc_xive_disconnect(SpaprInterruptController *intc)
831 {
832     SpaprXive *xive = SPAPR_XIVE(intc);
833     XiveSource *xsrc;
834     size_t esb_len;
835 
836     assert(xive->fd != -1);
837 
838     /* Clear the KVM mapping */
839     xsrc = &xive->source;
840     esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
841 
842     if (xsrc->esb_mmap) {
843         memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm);
844         object_unparent(OBJECT(&xsrc->esb_mmio_kvm));
845         munmap(xsrc->esb_mmap, esb_len);
846         xsrc->esb_mmap = NULL;
847     }
848 
849     if (xive->tm_mmap) {
850         memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm);
851         object_unparent(OBJECT(&xive->tm_mmio_kvm));
852         munmap(xive->tm_mmap, 4ull << TM_SHIFT);
853         xive->tm_mmap = NULL;
854     }
855 
856     /*
857      * When the KVM device fd is closed, the KVM device is destroyed
858      * and removed from the list of devices of the VM. The VCPU
859      * presenters are also detached from the device.
860      */
861     close(xive->fd);
862     xive->fd = -1;
863 
864     kvm_kernel_irqchip = false;
865     kvm_msi_via_irqfd_allowed = false;
866     kvm_gsi_direct_mapping = false;
867 
868     /* Clear the local list of presenter (hotplug) */
869     kvm_cpu_disable_all();
870 
871     /* VM Change state handler is not needed anymore */
872     if (xive->change) {
873         qemu_del_vm_change_state_handler(xive->change);
874         xive->change = NULL;
875     }
876 }
877