xref: /openbmc/qemu/hw/intc/spapr_xive_kvm.c (revision 5fa36b7ffbcb2056249929a7b1ee4e30c07dc67c)
1 /*
2  * QEMU PowerPC sPAPR XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/kvm.h"
17 #include "sysemu/runstate.h"
18 #include "hw/ppc/spapr.h"
19 #include "hw/ppc/spapr_cpu_core.h"
20 #include "hw/ppc/spapr_xive.h"
21 #include "hw/ppc/xive.h"
22 #include "kvm_ppc.h"
23 
24 #include <sys/ioctl.h>
25 
26 /*
27  * Helpers for CPU hotplug
28  *
29  * TODO: make a common KVMEnabledCPU layer for XICS and XIVE
30  */
31 typedef struct KVMEnabledCPU {
32     unsigned long vcpu_id;
33     QLIST_ENTRY(KVMEnabledCPU) node;
34 } KVMEnabledCPU;
35 
36 static QLIST_HEAD(, KVMEnabledCPU)
37     kvm_enabled_cpus = QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus);
38 
39 static bool kvm_cpu_is_enabled(CPUState *cs)
40 {
41     KVMEnabledCPU *enabled_cpu;
42     unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
43 
44     QLIST_FOREACH(enabled_cpu, &kvm_enabled_cpus, node) {
45         if (enabled_cpu->vcpu_id == vcpu_id) {
46             return true;
47         }
48     }
49     return false;
50 }
51 
52 static void kvm_cpu_enable(CPUState *cs)
53 {
54     KVMEnabledCPU *enabled_cpu;
55     unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
56 
57     enabled_cpu = g_malloc(sizeof(*enabled_cpu));
58     enabled_cpu->vcpu_id = vcpu_id;
59     QLIST_INSERT_HEAD(&kvm_enabled_cpus, enabled_cpu, node);
60 }
61 
62 static void kvm_cpu_disable_all(void)
63 {
64     KVMEnabledCPU *enabled_cpu, *next;
65 
66     QLIST_FOREACH_SAFE(enabled_cpu, &kvm_enabled_cpus, node, next) {
67         QLIST_REMOVE(enabled_cpu, node);
68         g_free(enabled_cpu);
69     }
70 }
71 
72 /*
73  * XIVE Thread Interrupt Management context (KVM)
74  */
75 
76 int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp)
77 {
78     SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
79     uint64_t state[2];
80     int ret;
81 
82     assert(xive->fd != -1);
83 
84     /* word0 and word1 of the OS ring. */
85     state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]);
86 
87     ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
88     if (ret != 0) {
89         error_setg_errno(errp, -ret,
90                          "XIVE: could not restore KVM state of CPU %ld",
91                          kvm_arch_vcpu_id(tctx->cs));
92         return ret;
93     }
94 
95     return 0;
96 }
97 
98 int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp)
99 {
100     SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
101     uint64_t state[2] = { 0 };
102     int ret;
103 
104     assert(xive->fd != -1);
105 
106     ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
107     if (ret != 0) {
108         error_setg_errno(errp, -ret,
109                          "XIVE: could not capture KVM state of CPU %ld",
110                          kvm_arch_vcpu_id(tctx->cs));
111         return ret;
112     }
113 
114     /* word0 and word1 of the OS ring. */
115     *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0];
116 
117     return 0;
118 }
119 
120 typedef struct {
121     XiveTCTX *tctx;
122     Error *err;
123 } XiveCpuGetState;
124 
125 static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu,
126                                                  run_on_cpu_data arg)
127 {
128     XiveCpuGetState *s = arg.host_ptr;
129 
130     kvmppc_xive_cpu_get_state(s->tctx, &s->err);
131 }
132 
133 void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp)
134 {
135     XiveCpuGetState s = {
136         .tctx = tctx,
137         .err = NULL,
138     };
139 
140     /*
141      * Kick the vCPU to make sure they are available for the KVM ioctl.
142      */
143     run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state,
144                RUN_ON_CPU_HOST_PTR(&s));
145 
146     if (s.err) {
147         error_propagate(errp, s.err);
148         return;
149     }
150 }
151 
152 int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
153 {
154     ERRP_GUARD();
155     SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
156     unsigned long vcpu_id;
157     int ret;
158 
159     assert(xive->fd != -1);
160 
161     /* Check if CPU was hot unplugged and replugged. */
162     if (kvm_cpu_is_enabled(tctx->cs)) {
163         return 0;
164     }
165 
166     vcpu_id = kvm_arch_vcpu_id(tctx->cs);
167 
168     ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd,
169                               vcpu_id, 0);
170     if (ret < 0) {
171         error_setg_errno(errp, -ret,
172                          "XIVE: unable to connect CPU%ld to KVM device",
173                          vcpu_id);
174         if (ret == -ENOSPC) {
175             error_append_hint(errp, "Try -smp maxcpus=N with N < %u\n",
176                               MACHINE(qdev_get_machine())->smp.max_cpus);
177         }
178         return ret;
179     }
180 
181     kvm_cpu_enable(tctx->cs);
182     return 0;
183 }
184 
185 /*
186  * XIVE Interrupt Source (KVM)
187  */
188 
189 void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
190                                    Error **errp)
191 {
192     uint32_t end_idx;
193     uint32_t end_blk;
194     uint8_t priority;
195     uint32_t server;
196     bool masked;
197     uint32_t eisn;
198     uint64_t kvm_src;
199     Error *local_err = NULL;
200 
201     assert(xive_eas_is_valid(eas));
202 
203     end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
204     end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
205     eisn = xive_get_field64(EAS_END_DATA, eas->w);
206     masked = xive_eas_is_masked(eas);
207 
208     spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
209 
210     kvm_src = priority << KVM_XIVE_SOURCE_PRIORITY_SHIFT &
211         KVM_XIVE_SOURCE_PRIORITY_MASK;
212     kvm_src |= server << KVM_XIVE_SOURCE_SERVER_SHIFT &
213         KVM_XIVE_SOURCE_SERVER_MASK;
214     kvm_src |= ((uint64_t) masked << KVM_XIVE_SOURCE_MASKED_SHIFT) &
215         KVM_XIVE_SOURCE_MASKED_MASK;
216     kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) &
217         KVM_XIVE_SOURCE_EISN_MASK;
218 
219     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn,
220                       &kvm_src, true, &local_err);
221     if (local_err) {
222         error_propagate(errp, local_err);
223         return;
224     }
225 }
226 
227 void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp)
228 {
229     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_SYNC, lisn,
230                       NULL, true, errp);
231 }
232 
233 /*
234  * At reset, the interrupt sources are simply created and MASKED. We
235  * only need to inform the KVM XIVE device about their type: LSI or
236  * MSI.
237  */
238 int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp)
239 {
240     SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
241     uint64_t state = 0;
242 
243     assert(xive->fd != -1);
244 
245     if (xive_source_irq_is_lsi(xsrc, srcno)) {
246         state |= KVM_XIVE_LEVEL_SENSITIVE;
247         if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
248             state |= KVM_XIVE_LEVEL_ASSERTED;
249         }
250     }
251 
252     return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, srcno, &state,
253                              true, errp);
254 }
255 
256 static int kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp)
257 {
258     SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
259     int i;
260 
261     for (i = 0; i < xsrc->nr_irqs; i++) {
262         int ret;
263 
264         if (!xive_eas_is_valid(&xive->eat[i])) {
265             continue;
266         }
267 
268         ret = kvmppc_xive_source_reset_one(xsrc, i, errp);
269         if (ret < 0) {
270             return ret;
271         }
272     }
273 
274     return 0;
275 }
276 
277 /*
278  * This is used to perform the magic loads on the ESB pages, described
279  * in xive.h.
280  *
281  * Memory barriers should not be needed for loads (no store for now).
282  */
283 static uint64_t xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
284                             uint64_t data, bool write)
285 {
286     uint64_t *addr = xsrc->esb_mmap + xive_source_esb_mgmt(xsrc, srcno) +
287         offset;
288 
289     if (write) {
290         *addr = cpu_to_be64(data);
291         return -1;
292     } else {
293         /* Prevent the compiler from optimizing away the load */
294         volatile uint64_t value = be64_to_cpu(*addr);
295         return value;
296     }
297 }
298 
299 static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset)
300 {
301     return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3;
302 }
303 
304 static void xive_esb_trigger(XiveSource *xsrc, int srcno)
305 {
306     uint64_t *addr = xsrc->esb_mmap + xive_source_esb_page(xsrc, srcno);
307 
308     *addr = 0x0;
309 }
310 
311 uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
312                             uint64_t data, bool write)
313 {
314     if (write) {
315         return xive_esb_rw(xsrc, srcno, offset, data, 1);
316     }
317 
318     /*
319      * Special Load EOI handling for LSI sources. Q bit is never set
320      * and the interrupt should be re-triggered if the level is still
321      * asserted.
322      */
323     if (xive_source_irq_is_lsi(xsrc, srcno) &&
324         offset == XIVE_ESB_LOAD_EOI) {
325         xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00);
326         if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
327             xive_esb_trigger(xsrc, srcno);
328         }
329         return 0;
330     } else {
331         return xive_esb_rw(xsrc, srcno, offset, 0, 0);
332     }
333 }
334 
335 static void kvmppc_xive_source_get_state(XiveSource *xsrc)
336 {
337     SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
338     int i;
339 
340     for (i = 0; i < xsrc->nr_irqs; i++) {
341         uint8_t pq;
342 
343         if (!xive_eas_is_valid(&xive->eat[i])) {
344             continue;
345         }
346 
347         /* Perform a load without side effect to retrieve the PQ bits */
348         pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
349 
350         /* and save PQ locally */
351         xive_source_esb_set(xsrc, i, pq);
352     }
353 }
354 
355 void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val)
356 {
357     XiveSource *xsrc = opaque;
358 
359     if (!xive_source_irq_is_lsi(xsrc, srcno)) {
360         if (!val) {
361             return;
362         }
363     } else {
364         if (val) {
365             xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
366         } else {
367             xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
368         }
369     }
370 
371     xive_esb_trigger(xsrc, srcno);
372 }
373 
374 /*
375  * sPAPR XIVE interrupt controller (KVM)
376  */
377 void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
378                                   uint32_t end_idx, XiveEND *end,
379                                   Error **errp)
380 {
381     struct kvm_ppc_xive_eq kvm_eq = { 0 };
382     uint64_t kvm_eq_idx;
383     uint8_t priority;
384     uint32_t server;
385     Error *local_err = NULL;
386 
387     assert(xive_end_is_valid(end));
388 
389     /* Encode the tuple (server, prio) as a KVM EQ index */
390     spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
391 
392     kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
393             KVM_XIVE_EQ_PRIORITY_MASK;
394     kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
395         KVM_XIVE_EQ_SERVER_MASK;
396 
397     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
398                       &kvm_eq, false, &local_err);
399     if (local_err) {
400         error_propagate(errp, local_err);
401         return;
402     }
403 
404     /*
405      * The EQ index and toggle bit are updated by HW. These are the
406      * only fields from KVM we want to update QEMU with. The other END
407      * fields should already be in the QEMU END table.
408      */
409     end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) |
410         xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex);
411 }
412 
413 void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
414                                   uint32_t end_idx, XiveEND *end,
415                                   Error **errp)
416 {
417     struct kvm_ppc_xive_eq kvm_eq = { 0 };
418     uint64_t kvm_eq_idx;
419     uint8_t priority;
420     uint32_t server;
421     Error *local_err = NULL;
422 
423     /*
424      * Build the KVM state from the local END structure.
425      */
426 
427     kvm_eq.flags = 0;
428     if (xive_get_field32(END_W0_UCOND_NOTIFY, end->w0)) {
429         kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
430     }
431 
432     /*
433      * If the hcall is disabling the EQ, set the size and page address
434      * to zero. When migrating, only valid ENDs are taken into
435      * account.
436      */
437     if (xive_end_is_valid(end)) {
438         kvm_eq.qshift = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
439         kvm_eq.qaddr  = xive_end_qaddr(end);
440         /*
441          * The EQ toggle bit and index should only be relevant when
442          * restoring the EQ state
443          */
444         kvm_eq.qtoggle = xive_get_field32(END_W1_GENERATION, end->w1);
445         kvm_eq.qindex  = xive_get_field32(END_W1_PAGE_OFF, end->w1);
446     } else {
447         kvm_eq.qshift = 0;
448         kvm_eq.qaddr  = 0;
449     }
450 
451     /* Encode the tuple (server, prio) as a KVM EQ index */
452     spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
453 
454     kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
455             KVM_XIVE_EQ_PRIORITY_MASK;
456     kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
457         KVM_XIVE_EQ_SERVER_MASK;
458 
459     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
460                       &kvm_eq, true, &local_err);
461     if (local_err) {
462         error_propagate(errp, local_err);
463         return;
464     }
465 }
466 
467 void kvmppc_xive_reset(SpaprXive *xive, Error **errp)
468 {
469     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_RESET,
470                       NULL, true, errp);
471 }
472 
473 static void kvmppc_xive_get_queues(SpaprXive *xive, Error **errp)
474 {
475     Error *local_err = NULL;
476     int i;
477 
478     for (i = 0; i < xive->nr_ends; i++) {
479         if (!xive_end_is_valid(&xive->endt[i])) {
480             continue;
481         }
482 
483         kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
484                                      &xive->endt[i], &local_err);
485         if (local_err) {
486             error_propagate(errp, local_err);
487             return;
488         }
489     }
490 }
491 
492 /*
493  * The primary goal of the XIVE VM change handler is to mark the EQ
494  * pages dirty when all XIVE event notifications have stopped.
495  *
496  * Whenever the VM is stopped, the VM change handler sets the source
497  * PQs to PENDING to stop the flow of events and to possibly catch a
498  * triggered interrupt occuring while the VM is stopped. The previous
499  * state is saved in anticipation of a migration. The XIVE controller
500  * is then synced through KVM to flush any in-flight event
501  * notification and stabilize the EQs.
502  *
503  * At this stage, we can mark the EQ page dirty and let a migration
504  * sequence transfer the EQ pages to the destination, which is done
505  * just after the stop state.
506  *
507  * The previous configuration of the sources is restored when the VM
508  * runs again. If an interrupt was queued while the VM was stopped,
509  * simply generate a trigger.
510  */
511 static void kvmppc_xive_change_state_handler(void *opaque, int running,
512                                              RunState state)
513 {
514     SpaprXive *xive = opaque;
515     XiveSource *xsrc = &xive->source;
516     Error *local_err = NULL;
517     int i;
518 
519     /*
520      * Restore the sources to their initial state. This is called when
521      * the VM resumes after a stop or a migration.
522      */
523     if (running) {
524         for (i = 0; i < xsrc->nr_irqs; i++) {
525             uint8_t pq;
526             uint8_t old_pq;
527 
528             if (!xive_eas_is_valid(&xive->eat[i])) {
529                 continue;
530             }
531 
532             pq = xive_source_esb_get(xsrc, i);
533             old_pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_00 + (pq << 8));
534 
535             /*
536              * An interrupt was queued while the VM was stopped,
537              * generate a trigger.
538              */
539             if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) {
540                 xive_esb_trigger(xsrc, i);
541             }
542         }
543 
544         return;
545     }
546 
547     /*
548      * Mask the sources, to stop the flow of event notifications, and
549      * save the PQs locally in the XiveSource object. The XiveSource
550      * state will be collected later on by its vmstate handler if a
551      * migration is in progress.
552      */
553     for (i = 0; i < xsrc->nr_irqs; i++) {
554         uint8_t pq;
555 
556         if (!xive_eas_is_valid(&xive->eat[i])) {
557             continue;
558         }
559 
560         pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
561 
562         /*
563          * PQ is set to PENDING to possibly catch a triggered
564          * interrupt occuring while the VM is stopped (hotplug event
565          * for instance) .
566          */
567         if (pq != XIVE_ESB_OFF) {
568             pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_10);
569         }
570         xive_source_esb_set(xsrc, i, pq);
571     }
572 
573     /*
574      * Sync the XIVE controller in KVM, to flush in-flight event
575      * notification that should be enqueued in the EQs and mark the
576      * XIVE EQ pages dirty to collect all updates.
577      */
578     kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
579                       KVM_DEV_XIVE_EQ_SYNC, NULL, true, &local_err);
580     if (local_err) {
581         error_report_err(local_err);
582         return;
583     }
584 }
585 
586 void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp)
587 {
588     assert(xive->fd != -1);
589 
590     /*
591      * When the VM is stopped, the sources are masked and the previous
592      * state is saved in anticipation of a migration. We should not
593      * synchronize the source state in that case else we will override
594      * the saved state.
595      */
596     if (runstate_is_running()) {
597         kvmppc_xive_source_get_state(&xive->source);
598     }
599 
600     /* EAT: there is no extra state to query from KVM */
601 
602     /* ENDT */
603     kvmppc_xive_get_queues(xive, errp);
604 }
605 
606 /*
607  * The SpaprXive 'pre_save' method is called by the vmstate handler of
608  * the SpaprXive model, after the XIVE controller is synced in the VM
609  * change handler.
610  */
611 int kvmppc_xive_pre_save(SpaprXive *xive)
612 {
613     Error *local_err = NULL;
614 
615     assert(xive->fd != -1);
616 
617     /* EAT: there is no extra state to query from KVM */
618 
619     /* ENDT */
620     kvmppc_xive_get_queues(xive, &local_err);
621     if (local_err) {
622         error_report_err(local_err);
623         return -1;
624     }
625 
626     return 0;
627 }
628 
629 /*
630  * The SpaprXive 'post_load' method is not called by a vmstate
631  * handler. It is called at the sPAPR machine level at the end of the
632  * migration sequence by the sPAPR IRQ backend 'post_load' method,
633  * when all XIVE states have been transferred and loaded.
634  */
635 int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
636 {
637     Error *local_err = NULL;
638     CPUState *cs;
639     int i;
640 
641     /* The KVM XIVE device should be in use */
642     assert(xive->fd != -1);
643 
644     /* Restore the ENDT first. The targetting depends on it. */
645     for (i = 0; i < xive->nr_ends; i++) {
646         if (!xive_end_is_valid(&xive->endt[i])) {
647             continue;
648         }
649 
650         kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
651                                      &xive->endt[i], &local_err);
652         if (local_err) {
653             error_report_err(local_err);
654             return -1;
655         }
656     }
657 
658     /* Restore the EAT */
659     for (i = 0; i < xive->nr_irqs; i++) {
660         if (!xive_eas_is_valid(&xive->eat[i])) {
661             continue;
662         }
663 
664         /*
665          * We can only restore the source config if the source has been
666          * previously set in KVM. Since we don't do that for all interrupts
667          * at reset time anymore, let's do it now.
668          */
669         kvmppc_xive_source_reset_one(&xive->source, i, &local_err);
670         if (local_err) {
671             error_report_err(local_err);
672             return -1;
673         }
674 
675         kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err);
676         if (local_err) {
677             error_report_err(local_err);
678             return -1;
679         }
680     }
681 
682     /*
683      * Restore the thread interrupt contexts of initial CPUs.
684      *
685      * The context of hotplugged CPUs is restored later, by the
686      * 'post_load' handler of the XiveTCTX model because they are not
687      * available at the time the SpaprXive 'post_load' method is
688      * called. We can not restore the context of all CPUs in the
689      * 'post_load' handler of XiveTCTX because the machine is not
690      * necessarily connected to the KVM device at that time.
691      */
692     CPU_FOREACH(cs) {
693         PowerPCCPU *cpu = POWERPC_CPU(cs);
694 
695         kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err);
696         if (local_err) {
697             error_report_err(local_err);
698             return -1;
699         }
700     }
701 
702     /* The source states will be restored when the machine starts running */
703     return 0;
704 }
705 
706 /* Returns MAP_FAILED on error and sets errno */
707 static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len,
708                               Error **errp)
709 {
710     void *addr;
711     uint32_t page_shift = 16; /* TODO: fix page_shift */
712 
713     addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, xive->fd,
714                 pgoff << page_shift);
715     if (addr == MAP_FAILED) {
716         error_setg_errno(errp, errno, "XIVE: unable to set memory mapping");
717     }
718 
719     return addr;
720 }
721 
722 /*
723  * All the XIVE memory regions are now backed by mappings from the KVM
724  * XIVE device.
725  */
726 int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
727                         Error **errp)
728 {
729     SpaprXive *xive = SPAPR_XIVE(intc);
730     XiveSource *xsrc = &xive->source;
731     Error *local_err = NULL;
732     size_t esb_len = xive_source_esb_len(xsrc);
733     size_t tima_len = 4ull << TM_SHIFT;
734     CPUState *cs;
735     int fd;
736     void *addr;
737 
738     /*
739      * The KVM XIVE device already in use. This is the case when
740      * rebooting under the XIVE-only interrupt mode.
741      */
742     if (xive->fd != -1) {
743         return 0;
744     }
745 
746     if (!kvmppc_has_cap_xive()) {
747         error_setg(errp, "IRQ_XIVE capability must be present for KVM");
748         return -1;
749     }
750 
751     /* First, create the KVM XIVE device */
752     fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false);
753     if (fd < 0) {
754         error_setg_errno(errp, -fd, "XIVE: error creating KVM device");
755         return -1;
756     }
757     xive->fd = fd;
758 
759     /* Tell KVM about the # of VCPUs we may have */
760     if (kvm_device_check_attr(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
761                               KVM_DEV_XIVE_NR_SERVERS)) {
762         if (kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
763                               KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true,
764                               &local_err)) {
765             goto fail;
766         }
767     }
768 
769     /*
770      * 1. Source ESB pages - KVM mapping
771      */
772     addr = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len,
773                             &local_err);
774     if (addr == MAP_FAILED) {
775         goto fail;
776     }
777     xsrc->esb_mmap = addr;
778 
779     memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc),
780                                       "xive.esb-kvm", esb_len, xsrc->esb_mmap);
781     memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0,
782                                         &xsrc->esb_mmio_kvm, 1);
783 
784     /*
785      * 2. END ESB pages (No KVM support yet)
786      */
787 
788     /*
789      * 3. TIMA pages - KVM mapping
790      */
791     addr = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len,
792                             &local_err);
793     if (addr == MAP_FAILED) {
794         goto fail;
795     }
796     xive->tm_mmap = addr;
797 
798     memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive),
799                                       "xive.tima", tima_len, xive->tm_mmap);
800     memory_region_add_subregion_overlap(&xive->tm_mmio, 0,
801                                         &xive->tm_mmio_kvm, 1);
802 
803     xive->change = qemu_add_vm_change_state_handler(
804         kvmppc_xive_change_state_handler, xive);
805 
806     /* Connect the presenters to the initial VCPUs of the machine */
807     CPU_FOREACH(cs) {
808         PowerPCCPU *cpu = POWERPC_CPU(cs);
809 
810         kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, &local_err);
811         if (local_err) {
812             goto fail;
813         }
814     }
815 
816     /* Update the KVM sources */
817     kvmppc_xive_source_reset(xsrc, &local_err);
818     if (local_err) {
819         goto fail;
820     }
821 
822     kvm_kernel_irqchip = true;
823     kvm_msi_via_irqfd_allowed = true;
824     kvm_gsi_direct_mapping = true;
825     return 0;
826 
827 fail:
828     error_propagate(errp, local_err);
829     kvmppc_xive_disconnect(intc);
830     return -1;
831 }
832 
833 void kvmppc_xive_disconnect(SpaprInterruptController *intc)
834 {
835     SpaprXive *xive = SPAPR_XIVE(intc);
836     XiveSource *xsrc;
837     size_t esb_len;
838 
839     assert(xive->fd != -1);
840 
841     /* Clear the KVM mapping */
842     xsrc = &xive->source;
843     esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
844 
845     if (xsrc->esb_mmap) {
846         memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm);
847         object_unparent(OBJECT(&xsrc->esb_mmio_kvm));
848         munmap(xsrc->esb_mmap, esb_len);
849         xsrc->esb_mmap = NULL;
850     }
851 
852     if (xive->tm_mmap) {
853         memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm);
854         object_unparent(OBJECT(&xive->tm_mmio_kvm));
855         munmap(xive->tm_mmap, 4ull << TM_SHIFT);
856         xive->tm_mmap = NULL;
857     }
858 
859     /*
860      * When the KVM device fd is closed, the KVM device is destroyed
861      * and removed from the list of devices of the VM. The VCPU
862      * presenters are also detached from the device.
863      */
864     close(xive->fd);
865     xive->fd = -1;
866 
867     kvm_kernel_irqchip = false;
868     kvm_msi_via_irqfd_allowed = false;
869     kvm_gsi_direct_mapping = false;
870 
871     /* Clear the local list of presenter (hotplug) */
872     kvm_cpu_disable_all();
873 
874     /* VM Change state handler is not needed anymore */
875     if (xive->change) {
876         qemu_del_vm_change_state_handler(xive->change);
877         xive->change = NULL;
878     }
879 }
880