xref: /openbmc/qemu/hw/ppc/spapr.c (revision 0a553c58)
1 /*
2  * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3  *
4  * Copyright (c) 2004-2007 Fabrice Bellard
5  * Copyright (c) 2007 Jocelyn Mayer
6  * Copyright (c) 2010 David Gibson, IBM Corporation.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  *
26  */
27 #include "qemu/osdep.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "sysemu/sysemu.h"
31 #include "sysemu/numa.h"
32 #include "sysemu/qtest.h"
33 #include "hw/hw.h"
34 #include "qemu/log.h"
35 #include "hw/fw-path-provider.h"
36 #include "elf.h"
37 #include "net/net.h"
38 #include "sysemu/device_tree.h"
39 #include "sysemu/cpus.h"
40 #include "sysemu/hw_accel.h"
41 #include "kvm_ppc.h"
42 #include "migration/misc.h"
43 #include "migration/global_state.h"
44 #include "migration/register.h"
45 #include "mmu-hash64.h"
46 #include "mmu-book3s-v3.h"
47 #include "cpu-models.h"
48 #include "qom/cpu.h"
49 
50 #include "hw/boards.h"
51 #include "hw/ppc/ppc.h"
52 #include "hw/loader.h"
53 
54 #include "hw/ppc/fdt.h"
55 #include "hw/ppc/spapr.h"
56 #include "hw/ppc/spapr_vio.h"
57 #include "hw/pci-host/spapr.h"
58 #include "hw/pci/msi.h"
59 
60 #include "hw/pci/pci.h"
61 #include "hw/scsi/scsi.h"
62 #include "hw/virtio/virtio-scsi.h"
63 #include "hw/virtio/vhost-scsi-common.h"
64 
65 #include "exec/address-spaces.h"
66 #include "exec/ram_addr.h"
67 #include "hw/usb.h"
68 #include "qemu/config-file.h"
69 #include "qemu/error-report.h"
70 #include "trace.h"
71 #include "hw/nmi.h"
72 #include "hw/intc/intc.h"
73 
74 #include "qemu/cutils.h"
75 #include "hw/ppc/spapr_cpu_core.h"
76 #include "hw/mem/memory-device.h"
77 
78 #include <libfdt.h>
79 
80 /* SLOF memory layout:
81  *
82  * SLOF raw image loaded at 0, copies its romfs right below the flat
83  * device-tree, then position SLOF itself 31M below that
84  *
85  * So we set FW_OVERHEAD to 40MB which should account for all of that
86  * and more
87  *
88  * We load our kernel at 4M, leaving space for SLOF initial image
89  */
90 #define FDT_MAX_SIZE            0x100000
91 #define RTAS_MAX_SIZE           0x10000
92 #define RTAS_MAX_ADDR           0x80000000 /* RTAS must stay below that */
93 #define FW_MAX_SIZE             0x400000
94 #define FW_FILE_NAME            "slof.bin"
95 #define FW_OVERHEAD             0x2800000
96 #define KERNEL_LOAD_ADDR        FW_MAX_SIZE
97 
98 #define MIN_RMA_SLOF            128UL
99 
100 #define PHANDLE_INTC            0x00001111
101 
102 /* These two functions implement the VCPU id numbering: one to compute them
103  * all and one to identify thread 0 of a VCORE. Any change to the first one
104  * is likely to have an impact on the second one, so let's keep them close.
105  */
106 static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index)
107 {
108     assert(spapr->vsmt);
109     return
110         (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;
111 }
112 static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr,
113                                       PowerPCCPU *cpu)
114 {
115     assert(spapr->vsmt);
116     return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
117 }
118 
119 static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
120 {
121     /* Dummy entries correspond to unused ICPState objects in older QEMUs,
122      * and newer QEMUs don't even have them. In both cases, we don't want
123      * to send anything on the wire.
124      */
125     return false;
126 }
127 
128 static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
129     .name = "icp/server",
130     .version_id = 1,
131     .minimum_version_id = 1,
132     .needed = pre_2_10_vmstate_dummy_icp_needed,
133     .fields = (VMStateField[]) {
134         VMSTATE_UNUSED(4), /* uint32_t xirr */
135         VMSTATE_UNUSED(1), /* uint8_t pending_priority */
136         VMSTATE_UNUSED(1), /* uint8_t mfrr */
137         VMSTATE_END_OF_LIST()
138     },
139 };
140 
141 static void pre_2_10_vmstate_register_dummy_icp(int i)
142 {
143     vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
144                      (void *)(uintptr_t) i);
145 }
146 
147 static void pre_2_10_vmstate_unregister_dummy_icp(int i)
148 {
149     vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
150                        (void *)(uintptr_t) i);
151 }
152 
153 int spapr_max_server_number(SpaprMachineState *spapr)
154 {
155     assert(spapr->vsmt);
156     return DIV_ROUND_UP(max_cpus * spapr->vsmt, smp_threads);
157 }
158 
159 static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
160                                   int smt_threads)
161 {
162     int i, ret = 0;
163     uint32_t servers_prop[smt_threads];
164     uint32_t gservers_prop[smt_threads * 2];
165     int index = spapr_get_vcpu_id(cpu);
166 
167     if (cpu->compat_pvr) {
168         ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
169         if (ret < 0) {
170             return ret;
171         }
172     }
173 
174     /* Build interrupt servers and gservers properties */
175     for (i = 0; i < smt_threads; i++) {
176         servers_prop[i] = cpu_to_be32(index + i);
177         /* Hack, direct the group queues back to cpu 0 */
178         gservers_prop[i*2] = cpu_to_be32(index + i);
179         gservers_prop[i*2 + 1] = 0;
180     }
181     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
182                       servers_prop, sizeof(servers_prop));
183     if (ret < 0) {
184         return ret;
185     }
186     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
187                       gservers_prop, sizeof(gservers_prop));
188 
189     return ret;
190 }
191 
192 static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, PowerPCCPU *cpu)
193 {
194     int index = spapr_get_vcpu_id(cpu);
195     uint32_t associativity[] = {cpu_to_be32(0x5),
196                                 cpu_to_be32(0x0),
197                                 cpu_to_be32(0x0),
198                                 cpu_to_be32(0x0),
199                                 cpu_to_be32(cpu->node_id),
200                                 cpu_to_be32(index)};
201 
202     /* Advertise NUMA via ibm,associativity */
203     return fdt_setprop(fdt, offset, "ibm,associativity", associativity,
204                           sizeof(associativity));
205 }
206 
207 /* Populate the "ibm,pa-features" property */
208 static void spapr_populate_pa_features(SpaprMachineState *spapr,
209                                        PowerPCCPU *cpu,
210                                        void *fdt, int offset,
211                                        bool legacy_guest)
212 {
213     uint8_t pa_features_206[] = { 6, 0,
214         0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
215     uint8_t pa_features_207[] = { 24, 0,
216         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
217         0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
218         0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
219         0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
220     uint8_t pa_features_300[] = { 66, 0,
221         /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
222         /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */
223         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */
224         /* 6: DS207 */
225         0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
226         /* 16: Vector */
227         0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
228         /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */
229         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
230         /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
231         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
232         /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */
233         0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
234         /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */
235         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */
236         /* 42: PM, 44: PC RA, 46: SC vec'd */
237         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
238         /* 48: SIMD, 50: QP BFP, 52: String */
239         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
240         /* 54: DecFP, 56: DecI, 58: SHA */
241         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
242         /* 60: NM atomic, 62: RNG */
243         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
244     };
245     uint8_t *pa_features = NULL;
246     size_t pa_size;
247 
248     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) {
249         pa_features = pa_features_206;
250         pa_size = sizeof(pa_features_206);
251     }
252     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) {
253         pa_features = pa_features_207;
254         pa_size = sizeof(pa_features_207);
255     }
256     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) {
257         pa_features = pa_features_300;
258         pa_size = sizeof(pa_features_300);
259     }
260     if (!pa_features) {
261         return;
262     }
263 
264     if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
265         /*
266          * Note: we keep CI large pages off by default because a 64K capable
267          * guest provisioned with large pages might otherwise try to map a qemu
268          * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
269          * even if that qemu runs on a 4k host.
270          * We dd this bit back here if we are confident this is not an issue
271          */
272         pa_features[3] |= 0x20;
273     }
274     if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) {
275         pa_features[24] |= 0x80;    /* Transactional memory support */
276     }
277     if (legacy_guest && pa_size > 40) {
278         /* Workaround for broken kernels that attempt (guest) radix
279          * mode when they can't handle it, if they see the radix bit set
280          * in pa-features. So hide it from them. */
281         pa_features[40 + 2] &= ~0x80; /* Radix MMU */
282     }
283 
284     _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
285 }
286 
287 static int spapr_fixup_cpu_dt(void *fdt, SpaprMachineState *spapr)
288 {
289     int ret = 0, offset, cpus_offset;
290     CPUState *cs;
291     char cpu_model[32];
292     uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
293 
294     CPU_FOREACH(cs) {
295         PowerPCCPU *cpu = POWERPC_CPU(cs);
296         DeviceClass *dc = DEVICE_GET_CLASS(cs);
297         int index = spapr_get_vcpu_id(cpu);
298         int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
299 
300         if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
301             continue;
302         }
303 
304         snprintf(cpu_model, 32, "%s@%x", dc->fw_name, index);
305 
306         cpus_offset = fdt_path_offset(fdt, "/cpus");
307         if (cpus_offset < 0) {
308             cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
309             if (cpus_offset < 0) {
310                 return cpus_offset;
311             }
312         }
313         offset = fdt_subnode_offset(fdt, cpus_offset, cpu_model);
314         if (offset < 0) {
315             offset = fdt_add_subnode(fdt, cpus_offset, cpu_model);
316             if (offset < 0) {
317                 return offset;
318             }
319         }
320 
321         ret = fdt_setprop(fdt, offset, "ibm,pft-size",
322                           pft_size_prop, sizeof(pft_size_prop));
323         if (ret < 0) {
324             return ret;
325         }
326 
327         if (nb_numa_nodes > 1) {
328             ret = spapr_fixup_cpu_numa_dt(fdt, offset, cpu);
329             if (ret < 0) {
330                 return ret;
331             }
332         }
333 
334         ret = spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt);
335         if (ret < 0) {
336             return ret;
337         }
338 
339         spapr_populate_pa_features(spapr, cpu, fdt, offset,
340                                    spapr->cas_legacy_guest_workaround);
341     }
342     return ret;
343 }
344 
345 static hwaddr spapr_node0_size(MachineState *machine)
346 {
347     if (nb_numa_nodes) {
348         int i;
349         for (i = 0; i < nb_numa_nodes; ++i) {
350             if (numa_info[i].node_mem) {
351                 return MIN(pow2floor(numa_info[i].node_mem),
352                            machine->ram_size);
353             }
354         }
355     }
356     return machine->ram_size;
357 }
358 
359 static void add_str(GString *s, const gchar *s1)
360 {
361     g_string_append_len(s, s1, strlen(s1) + 1);
362 }
363 
364 static int spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start,
365                                        hwaddr size)
366 {
367     uint32_t associativity[] = {
368         cpu_to_be32(0x4), /* length */
369         cpu_to_be32(0x0), cpu_to_be32(0x0),
370         cpu_to_be32(0x0), cpu_to_be32(nodeid)
371     };
372     char mem_name[32];
373     uint64_t mem_reg_property[2];
374     int off;
375 
376     mem_reg_property[0] = cpu_to_be64(start);
377     mem_reg_property[1] = cpu_to_be64(size);
378 
379     sprintf(mem_name, "memory@" TARGET_FMT_lx, start);
380     off = fdt_add_subnode(fdt, 0, mem_name);
381     _FDT(off);
382     _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
383     _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
384                       sizeof(mem_reg_property))));
385     _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
386                       sizeof(associativity))));
387     return off;
388 }
389 
390 static int spapr_populate_memory(SpaprMachineState *spapr, void *fdt)
391 {
392     MachineState *machine = MACHINE(spapr);
393     hwaddr mem_start, node_size;
394     int i, nb_nodes = nb_numa_nodes;
395     NodeInfo *nodes = numa_info;
396     NodeInfo ramnode;
397 
398     /* No NUMA nodes, assume there is just one node with whole RAM */
399     if (!nb_numa_nodes) {
400         nb_nodes = 1;
401         ramnode.node_mem = machine->ram_size;
402         nodes = &ramnode;
403     }
404 
405     for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
406         if (!nodes[i].node_mem) {
407             continue;
408         }
409         if (mem_start >= machine->ram_size) {
410             node_size = 0;
411         } else {
412             node_size = nodes[i].node_mem;
413             if (node_size > machine->ram_size - mem_start) {
414                 node_size = machine->ram_size - mem_start;
415             }
416         }
417         if (!mem_start) {
418             /* spapr_machine_init() checks for rma_size <= node0_size
419              * already */
420             spapr_populate_memory_node(fdt, i, 0, spapr->rma_size);
421             mem_start += spapr->rma_size;
422             node_size -= spapr->rma_size;
423         }
424         for ( ; node_size; ) {
425             hwaddr sizetmp = pow2floor(node_size);
426 
427             /* mem_start != 0 here */
428             if (ctzl(mem_start) < ctzl(sizetmp)) {
429                 sizetmp = 1ULL << ctzl(mem_start);
430             }
431 
432             spapr_populate_memory_node(fdt, i, mem_start, sizetmp);
433             node_size -= sizetmp;
434             mem_start += sizetmp;
435         }
436     }
437 
438     return 0;
439 }
440 
441 static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
442                                   SpaprMachineState *spapr)
443 {
444     PowerPCCPU *cpu = POWERPC_CPU(cs);
445     CPUPPCState *env = &cpu->env;
446     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
447     int index = spapr_get_vcpu_id(cpu);
448     uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
449                        0xffffffff, 0xffffffff};
450     uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
451         : SPAPR_TIMEBASE_FREQ;
452     uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
453     uint32_t page_sizes_prop[64];
454     size_t page_sizes_prop_size;
455     uint32_t vcpus_per_socket = smp_threads * smp_cores;
456     uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
457     int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
458     SpaprDrc *drc;
459     int drc_index;
460     uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
461     int i;
462 
463     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index);
464     if (drc) {
465         drc_index = spapr_drc_index(drc);
466         _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
467     }
468 
469     _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
470     _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
471 
472     _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
473     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
474                            env->dcache_line_size)));
475     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
476                            env->dcache_line_size)));
477     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
478                            env->icache_line_size)));
479     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
480                            env->icache_line_size)));
481 
482     if (pcc->l1_dcache_size) {
483         _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
484                                pcc->l1_dcache_size)));
485     } else {
486         warn_report("Unknown L1 dcache size for cpu");
487     }
488     if (pcc->l1_icache_size) {
489         _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
490                                pcc->l1_icache_size)));
491     } else {
492         warn_report("Unknown L1 icache size for cpu");
493     }
494 
495     _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
496     _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
497     _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size)));
498     _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size)));
499     _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
500     _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
501 
502     if (env->spr_cb[SPR_PURR].oea_read) {
503         _FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0)));
504     }
505 
506     if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
507         _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
508                           segs, sizeof(segs))));
509     }
510 
511     /* Advertise VSX (vector extensions) if available
512      *   1               == VMX / Altivec available
513      *   2               == VSX available
514      *
515      * Only CPUs for which we create core types in spapr_cpu_core.c
516      * are possible, and all of those have VMX */
517     if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) {
518         _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2)));
519     } else {
520         _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1)));
521     }
522 
523     /* Advertise DFP (Decimal Floating Point) if available
524      *   0 / no property == no DFP
525      *   1               == DFP available */
526     if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) {
527         _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
528     }
529 
530     page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop,
531                                                       sizeof(page_sizes_prop));
532     if (page_sizes_prop_size) {
533         _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
534                           page_sizes_prop, page_sizes_prop_size)));
535     }
536 
537     spapr_populate_pa_features(spapr, cpu, fdt, offset, false);
538 
539     _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
540                            cs->cpu_index / vcpus_per_socket)));
541 
542     _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
543                       pft_size_prop, sizeof(pft_size_prop))));
544 
545     if (nb_numa_nodes > 1) {
546         _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cpu));
547     }
548 
549     _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
550 
551     if (pcc->radix_page_info) {
552         for (i = 0; i < pcc->radix_page_info->count; i++) {
553             radix_AP_encodings[i] =
554                 cpu_to_be32(pcc->radix_page_info->entries[i]);
555         }
556         _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
557                           radix_AP_encodings,
558                           pcc->radix_page_info->count *
559                           sizeof(radix_AP_encodings[0]))));
560     }
561 
562     /*
563      * We set this property to let the guest know that it can use the large
564      * decrementer and its width in bits.
565      */
566     if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF)
567         _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits",
568                               pcc->lrg_decr_bits)));
569 }
570 
571 static void spapr_populate_cpus_dt_node(void *fdt, SpaprMachineState *spapr)
572 {
573     CPUState **rev;
574     CPUState *cs;
575     int n_cpus;
576     int cpus_offset;
577     char *nodename;
578     int i;
579 
580     cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
581     _FDT(cpus_offset);
582     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
583     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
584 
585     /*
586      * We walk the CPUs in reverse order to ensure that CPU DT nodes
587      * created by fdt_add_subnode() end up in the right order in FDT
588      * for the guest kernel the enumerate the CPUs correctly.
589      *
590      * The CPU list cannot be traversed in reverse order, so we need
591      * to do extra work.
592      */
593     n_cpus = 0;
594     rev = NULL;
595     CPU_FOREACH(cs) {
596         rev = g_renew(CPUState *, rev, n_cpus + 1);
597         rev[n_cpus++] = cs;
598     }
599 
600     for (i = n_cpus - 1; i >= 0; i--) {
601         CPUState *cs = rev[i];
602         PowerPCCPU *cpu = POWERPC_CPU(cs);
603         int index = spapr_get_vcpu_id(cpu);
604         DeviceClass *dc = DEVICE_GET_CLASS(cs);
605         int offset;
606 
607         if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
608             continue;
609         }
610 
611         nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
612         offset = fdt_add_subnode(fdt, cpus_offset, nodename);
613         g_free(nodename);
614         _FDT(offset);
615         spapr_populate_cpu_dt(cs, fdt, offset, spapr);
616     }
617 
618     g_free(rev);
619 }
620 
621 static int spapr_rng_populate_dt(void *fdt)
622 {
623     int node;
624     int ret;
625 
626     node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities");
627     if (node <= 0) {
628         return -1;
629     }
630     ret = fdt_setprop_string(fdt, node, "device_type",
631                              "ibm,platform-facilities");
632     ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1);
633     ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0);
634 
635     node = fdt_add_subnode(fdt, node, "ibm,random-v1");
636     if (node <= 0) {
637         return -1;
638     }
639     ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random");
640 
641     return ret ? -1 : 0;
642 }
643 
644 static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr)
645 {
646     MemoryDeviceInfoList *info;
647 
648     for (info = list; info; info = info->next) {
649         MemoryDeviceInfo *value = info->value;
650 
651         if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) {
652             PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data;
653 
654             if (addr >= pcdimm_info->addr &&
655                 addr < (pcdimm_info->addr + pcdimm_info->size)) {
656                 return pcdimm_info->node;
657             }
658         }
659     }
660 
661     return -1;
662 }
663 
664 struct sPAPRDrconfCellV2 {
665      uint32_t seq_lmbs;
666      uint64_t base_addr;
667      uint32_t drc_index;
668      uint32_t aa_index;
669      uint32_t flags;
670 } QEMU_PACKED;
671 
672 typedef struct DrconfCellQueue {
673     struct sPAPRDrconfCellV2 cell;
674     QSIMPLEQ_ENTRY(DrconfCellQueue) entry;
675 } DrconfCellQueue;
676 
677 static DrconfCellQueue *
678 spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr,
679                       uint32_t drc_index, uint32_t aa_index,
680                       uint32_t flags)
681 {
682     DrconfCellQueue *elem;
683 
684     elem = g_malloc0(sizeof(*elem));
685     elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs);
686     elem->cell.base_addr = cpu_to_be64(base_addr);
687     elem->cell.drc_index = cpu_to_be32(drc_index);
688     elem->cell.aa_index = cpu_to_be32(aa_index);
689     elem->cell.flags = cpu_to_be32(flags);
690 
691     return elem;
692 }
693 
694 /* ibm,dynamic-memory-v2 */
695 static int spapr_populate_drmem_v2(SpaprMachineState *spapr, void *fdt,
696                                    int offset, MemoryDeviceInfoList *dimms)
697 {
698     MachineState *machine = MACHINE(spapr);
699     uint8_t *int_buf, *cur_index;
700     int ret;
701     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
702     uint64_t addr, cur_addr, size;
703     uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size);
704     uint64_t mem_end = machine->device_memory->base +
705                        memory_region_size(&machine->device_memory->mr);
706     uint32_t node, buf_len, nr_entries = 0;
707     SpaprDrc *drc;
708     DrconfCellQueue *elem, *next;
709     MemoryDeviceInfoList *info;
710     QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue
711         = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue);
712 
713     /* Entry to cover RAM and the gap area */
714     elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1,
715                                  SPAPR_LMB_FLAGS_RESERVED |
716                                  SPAPR_LMB_FLAGS_DRC_INVALID);
717     QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
718     nr_entries++;
719 
720     cur_addr = machine->device_memory->base;
721     for (info = dimms; info; info = info->next) {
722         PCDIMMDeviceInfo *di = info->value->u.dimm.data;
723 
724         addr = di->addr;
725         size = di->size;
726         node = di->node;
727 
728         /* Entry for hot-pluggable area */
729         if (cur_addr < addr) {
730             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
731             g_assert(drc);
732             elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size,
733                                          cur_addr, spapr_drc_index(drc), -1, 0);
734             QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
735             nr_entries++;
736         }
737 
738         /* Entry for DIMM */
739         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size);
740         g_assert(drc);
741         elem = spapr_get_drconf_cell(size / lmb_size, addr,
742                                      spapr_drc_index(drc), node,
743                                      SPAPR_LMB_FLAGS_ASSIGNED);
744         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
745         nr_entries++;
746         cur_addr = addr + size;
747     }
748 
749     /* Entry for remaining hotpluggable area */
750     if (cur_addr < mem_end) {
751         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
752         g_assert(drc);
753         elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size,
754                                      cur_addr, spapr_drc_index(drc), -1, 0);
755         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
756         nr_entries++;
757     }
758 
759     buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t);
760     int_buf = cur_index = g_malloc0(buf_len);
761     *(uint32_t *)int_buf = cpu_to_be32(nr_entries);
762     cur_index += sizeof(nr_entries);
763 
764     QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) {
765         memcpy(cur_index, &elem->cell, sizeof(elem->cell));
766         cur_index += sizeof(elem->cell);
767         QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry);
768         g_free(elem);
769     }
770 
771     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len);
772     g_free(int_buf);
773     if (ret < 0) {
774         return -1;
775     }
776     return 0;
777 }
778 
779 /* ibm,dynamic-memory */
780 static int spapr_populate_drmem_v1(SpaprMachineState *spapr, void *fdt,
781                                    int offset, MemoryDeviceInfoList *dimms)
782 {
783     MachineState *machine = MACHINE(spapr);
784     int i, ret;
785     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
786     uint32_t device_lmb_start = machine->device_memory->base / lmb_size;
787     uint32_t nr_lmbs = (machine->device_memory->base +
788                        memory_region_size(&machine->device_memory->mr)) /
789                        lmb_size;
790     uint32_t *int_buf, *cur_index, buf_len;
791 
792     /*
793      * Allocate enough buffer size to fit in ibm,dynamic-memory
794      */
795     buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t);
796     cur_index = int_buf = g_malloc0(buf_len);
797     int_buf[0] = cpu_to_be32(nr_lmbs);
798     cur_index++;
799     for (i = 0; i < nr_lmbs; i++) {
800         uint64_t addr = i * lmb_size;
801         uint32_t *dynamic_memory = cur_index;
802 
803         if (i >= device_lmb_start) {
804             SpaprDrc *drc;
805 
806             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
807             g_assert(drc);
808 
809             dynamic_memory[0] = cpu_to_be32(addr >> 32);
810             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
811             dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
812             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
813             dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr));
814             if (memory_region_present(get_system_memory(), addr)) {
815                 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
816             } else {
817                 dynamic_memory[5] = cpu_to_be32(0);
818             }
819         } else {
820             /*
821              * LMB information for RMA, boot time RAM and gap b/n RAM and
822              * device memory region -- all these are marked as reserved
823              * and as having no valid DRC.
824              */
825             dynamic_memory[0] = cpu_to_be32(addr >> 32);
826             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
827             dynamic_memory[2] = cpu_to_be32(0);
828             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
829             dynamic_memory[4] = cpu_to_be32(-1);
830             dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
831                                             SPAPR_LMB_FLAGS_DRC_INVALID);
832         }
833 
834         cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
835     }
836     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
837     g_free(int_buf);
838     if (ret < 0) {
839         return -1;
840     }
841     return 0;
842 }
843 
844 /*
845  * Adds ibm,dynamic-reconfiguration-memory node.
846  * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
847  * of this device tree node.
848  */
849 static int spapr_populate_drconf_memory(SpaprMachineState *spapr, void *fdt)
850 {
851     MachineState *machine = MACHINE(spapr);
852     int ret, i, offset;
853     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
854     uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)};
855     uint32_t *int_buf, *cur_index, buf_len;
856     int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
857     MemoryDeviceInfoList *dimms = NULL;
858 
859     /*
860      * Don't create the node if there is no device memory
861      */
862     if (machine->ram_size == machine->maxram_size) {
863         return 0;
864     }
865 
866     offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
867 
868     ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
869                     sizeof(prop_lmb_size));
870     if (ret < 0) {
871         return ret;
872     }
873 
874     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
875     if (ret < 0) {
876         return ret;
877     }
878 
879     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
880     if (ret < 0) {
881         return ret;
882     }
883 
884     /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */
885     dimms = qmp_memory_device_list();
886     if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) {
887         ret = spapr_populate_drmem_v2(spapr, fdt, offset, dimms);
888     } else {
889         ret = spapr_populate_drmem_v1(spapr, fdt, offset, dimms);
890     }
891     qapi_free_MemoryDeviceInfoList(dimms);
892 
893     if (ret < 0) {
894         return ret;
895     }
896 
897     /* ibm,associativity-lookup-arrays */
898     buf_len = (nr_nodes * 4 + 2) * sizeof(uint32_t);
899     cur_index = int_buf = g_malloc0(buf_len);
900     int_buf[0] = cpu_to_be32(nr_nodes);
901     int_buf[1] = cpu_to_be32(4); /* Number of entries per associativity list */
902     cur_index += 2;
903     for (i = 0; i < nr_nodes; i++) {
904         uint32_t associativity[] = {
905             cpu_to_be32(0x0),
906             cpu_to_be32(0x0),
907             cpu_to_be32(0x0),
908             cpu_to_be32(i)
909         };
910         memcpy(cur_index, associativity, sizeof(associativity));
911         cur_index += 4;
912     }
913     ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
914             (cur_index - int_buf) * sizeof(uint32_t));
915     g_free(int_buf);
916 
917     return ret;
918 }
919 
920 static int spapr_dt_cas_updates(SpaprMachineState *spapr, void *fdt,
921                                 SpaprOptionVector *ov5_updates)
922 {
923     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
924     int ret = 0, offset;
925 
926     /* Generate ibm,dynamic-reconfiguration-memory node if required */
927     if (spapr_ovec_test(ov5_updates, OV5_DRCONF_MEMORY)) {
928         g_assert(smc->dr_lmb_enabled);
929         ret = spapr_populate_drconf_memory(spapr, fdt);
930         if (ret) {
931             goto out;
932         }
933     }
934 
935     offset = fdt_path_offset(fdt, "/chosen");
936     if (offset < 0) {
937         offset = fdt_add_subnode(fdt, 0, "chosen");
938         if (offset < 0) {
939             return offset;
940         }
941     }
942     ret = spapr_ovec_populate_dt(fdt, offset, spapr->ov5_cas,
943                                  "ibm,architecture-vec-5");
944 
945 out:
946     return ret;
947 }
948 
949 static bool spapr_hotplugged_dev_before_cas(void)
950 {
951     Object *drc_container, *obj;
952     ObjectProperty *prop;
953     ObjectPropertyIterator iter;
954 
955     drc_container = container_get(object_get_root(), "/dr-connector");
956     object_property_iter_init(&iter, drc_container);
957     while ((prop = object_property_iter_next(&iter))) {
958         if (!strstart(prop->type, "link<", NULL)) {
959             continue;
960         }
961         obj = object_property_get_link(drc_container, prop->name, NULL);
962         if (spapr_drc_needed(obj)) {
963             return true;
964         }
965     }
966     return false;
967 }
968 
969 int spapr_h_cas_compose_response(SpaprMachineState *spapr,
970                                  target_ulong addr, target_ulong size,
971                                  SpaprOptionVector *ov5_updates)
972 {
973     void *fdt, *fdt_skel;
974     SpaprDeviceTreeUpdateHeader hdr = { .version_id = 1 };
975 
976     if (spapr_hotplugged_dev_before_cas()) {
977         return 1;
978     }
979 
980     if (size < sizeof(hdr) || size > FW_MAX_SIZE) {
981         error_report("SLOF provided an unexpected CAS buffer size "
982                      TARGET_FMT_lu " (min: %zu, max: %u)",
983                      size, sizeof(hdr), FW_MAX_SIZE);
984         exit(EXIT_FAILURE);
985     }
986 
987     size -= sizeof(hdr);
988 
989     /* Create skeleton */
990     fdt_skel = g_malloc0(size);
991     _FDT((fdt_create(fdt_skel, size)));
992     _FDT((fdt_finish_reservemap(fdt_skel)));
993     _FDT((fdt_begin_node(fdt_skel, "")));
994     _FDT((fdt_end_node(fdt_skel)));
995     _FDT((fdt_finish(fdt_skel)));
996     fdt = g_malloc0(size);
997     _FDT((fdt_open_into(fdt_skel, fdt, size)));
998     g_free(fdt_skel);
999 
1000     /* Fixup cpu nodes */
1001     _FDT((spapr_fixup_cpu_dt(fdt, spapr)));
1002 
1003     if (spapr_dt_cas_updates(spapr, fdt, ov5_updates)) {
1004         return -1;
1005     }
1006 
1007     /* Pack resulting tree */
1008     _FDT((fdt_pack(fdt)));
1009 
1010     if (fdt_totalsize(fdt) + sizeof(hdr) > size) {
1011         trace_spapr_cas_failed(size);
1012         return -1;
1013     }
1014 
1015     cpu_physical_memory_write(addr, &hdr, sizeof(hdr));
1016     cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt));
1017     trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
1018     g_free(fdt);
1019 
1020     return 0;
1021 }
1022 
1023 static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
1024 {
1025     int rtas;
1026     GString *hypertas = g_string_sized_new(256);
1027     GString *qemu_hypertas = g_string_sized_new(256);
1028     uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) };
1029     uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
1030         memory_region_size(&MACHINE(spapr)->device_memory->mr);
1031     uint32_t lrdr_capacity[] = {
1032         cpu_to_be32(max_device_addr >> 32),
1033         cpu_to_be32(max_device_addr & 0xffffffff),
1034         0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE),
1035         cpu_to_be32(max_cpus / smp_threads),
1036     };
1037     uint32_t maxdomains[] = {
1038         cpu_to_be32(4),
1039         cpu_to_be32(0),
1040         cpu_to_be32(0),
1041         cpu_to_be32(0),
1042         cpu_to_be32(nb_numa_nodes ? nb_numa_nodes : 1),
1043     };
1044 
1045     _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
1046 
1047     /* hypertas */
1048     add_str(hypertas, "hcall-pft");
1049     add_str(hypertas, "hcall-term");
1050     add_str(hypertas, "hcall-dabr");
1051     add_str(hypertas, "hcall-interrupt");
1052     add_str(hypertas, "hcall-tce");
1053     add_str(hypertas, "hcall-vio");
1054     add_str(hypertas, "hcall-splpar");
1055     add_str(hypertas, "hcall-bulk");
1056     add_str(hypertas, "hcall-set-mode");
1057     add_str(hypertas, "hcall-sprg0");
1058     add_str(hypertas, "hcall-copy");
1059     add_str(hypertas, "hcall-debug");
1060     add_str(hypertas, "hcall-vphn");
1061     add_str(qemu_hypertas, "hcall-memop1");
1062 
1063     if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
1064         add_str(hypertas, "hcall-multi-tce");
1065     }
1066 
1067     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
1068         add_str(hypertas, "hcall-hpt-resize");
1069     }
1070 
1071     _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
1072                      hypertas->str, hypertas->len));
1073     g_string_free(hypertas, TRUE);
1074     _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
1075                      qemu_hypertas->str, qemu_hypertas->len));
1076     g_string_free(qemu_hypertas, TRUE);
1077 
1078     _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
1079                      refpoints, sizeof(refpoints)));
1080 
1081     _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains",
1082                      maxdomains, sizeof(maxdomains)));
1083 
1084     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
1085                           RTAS_ERROR_LOG_MAX));
1086     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
1087                           RTAS_EVENT_SCAN_RATE));
1088 
1089     g_assert(msi_nonbroken);
1090     _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
1091 
1092     /*
1093      * According to PAPR, rtas ibm,os-term does not guarantee a return
1094      * back to the guest cpu.
1095      *
1096      * While an additional ibm,extended-os-term property indicates
1097      * that rtas call return will always occur. Set this property.
1098      */
1099     _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
1100 
1101     _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
1102                      lrdr_capacity, sizeof(lrdr_capacity)));
1103 
1104     spapr_dt_rtas_tokens(fdt, rtas);
1105 }
1106 
1107 /*
1108  * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU
1109  * and the XIVE features that the guest may request and thus the valid
1110  * values for bytes 23..26 of option vector 5:
1111  */
1112 static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt,
1113                                           int chosen)
1114 {
1115     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1116 
1117     char val[2 * 4] = {
1118         23, spapr->irq->ov5, /* Xive mode. */
1119         24, 0x00, /* Hash/Radix, filled in below. */
1120         25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
1121         26, 0x40, /* Radix options: GTSE == yes. */
1122     };
1123 
1124     if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
1125                           first_ppc_cpu->compat_pvr)) {
1126         /*
1127          * If we're in a pre POWER9 compat mode then the guest should
1128          * do hash and use the legacy interrupt mode
1129          */
1130         val[1] = 0x00; /* XICS */
1131         val[3] = 0x00; /* Hash */
1132     } else if (kvm_enabled()) {
1133         if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
1134             val[3] = 0x80; /* OV5_MMU_BOTH */
1135         } else if (kvmppc_has_cap_mmu_radix()) {
1136             val[3] = 0x40; /* OV5_MMU_RADIX_300 */
1137         } else {
1138             val[3] = 0x00; /* Hash */
1139         }
1140     } else {
1141         /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
1142         val[3] = 0xC0;
1143     }
1144     _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
1145                      val, sizeof(val)));
1146 }
1147 
1148 static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt)
1149 {
1150     MachineState *machine = MACHINE(spapr);
1151     int chosen;
1152     const char *boot_device = machine->boot_order;
1153     char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
1154     size_t cb = 0;
1155     char *bootlist = get_boot_devices_list(&cb);
1156 
1157     _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
1158 
1159     _FDT(fdt_setprop_string(fdt, chosen, "bootargs", machine->kernel_cmdline));
1160     _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
1161                           spapr->initrd_base));
1162     _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
1163                           spapr->initrd_base + spapr->initrd_size));
1164 
1165     if (spapr->kernel_size) {
1166         uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR),
1167                               cpu_to_be64(spapr->kernel_size) };
1168 
1169         _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
1170                          &kprop, sizeof(kprop)));
1171         if (spapr->kernel_le) {
1172             _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
1173         }
1174     }
1175     if (boot_menu) {
1176         _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu)));
1177     }
1178     _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
1179     _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
1180     _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
1181 
1182     if (cb && bootlist) {
1183         int i;
1184 
1185         for (i = 0; i < cb; i++) {
1186             if (bootlist[i] == '\n') {
1187                 bootlist[i] = ' ';
1188             }
1189         }
1190         _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
1191     }
1192 
1193     if (boot_device && strlen(boot_device)) {
1194         _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
1195     }
1196 
1197     if (!spapr->has_graphics && stdout_path) {
1198         /*
1199          * "linux,stdout-path" and "stdout" properties are deprecated by linux
1200          * kernel. New platforms should only use the "stdout-path" property. Set
1201          * the new property and continue using older property to remain
1202          * compatible with the existing firmware.
1203          */
1204         _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
1205         _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path));
1206     }
1207 
1208     spapr_dt_ov5_platform_support(spapr, fdt, chosen);
1209 
1210     g_free(stdout_path);
1211     g_free(bootlist);
1212 }
1213 
1214 static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt)
1215 {
1216     /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1217      * KVM to work under pHyp with some guest co-operation */
1218     int hypervisor;
1219     uint8_t hypercall[16];
1220 
1221     _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
1222     /* indicate KVM hypercall interface */
1223     _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
1224     if (kvmppc_has_cap_fixup_hcalls()) {
1225         /*
1226          * Older KVM versions with older guest kernels were broken
1227          * with the magic page, don't allow the guest to map it.
1228          */
1229         if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
1230                                   sizeof(hypercall))) {
1231             _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
1232                              hypercall, sizeof(hypercall)));
1233         }
1234     }
1235 }
1236 
1237 static void *spapr_build_fdt(SpaprMachineState *spapr)
1238 {
1239     MachineState *machine = MACHINE(spapr);
1240     MachineClass *mc = MACHINE_GET_CLASS(machine);
1241     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1242     int ret;
1243     void *fdt;
1244     SpaprPhbState *phb;
1245     char *buf;
1246 
1247     fdt = g_malloc0(FDT_MAX_SIZE);
1248     _FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE)));
1249 
1250     /* Root node */
1251     _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
1252     _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
1253     _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
1254 
1255     /*
1256      * Add info to guest to indentify which host is it being run on
1257      * and what is the uuid of the guest
1258      */
1259     if (spapr->host_model && !g_str_equal(spapr->host_model, "none")) {
1260         if (g_str_equal(spapr->host_model, "passthrough")) {
1261             /* -M host-model=passthrough */
1262             if (kvmppc_get_host_model(&buf)) {
1263                 _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
1264                 g_free(buf);
1265             }
1266         } else {
1267             /* -M host-model=<user-string> */
1268             _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model));
1269         }
1270     }
1271 
1272     if (spapr->host_serial && !g_str_equal(spapr->host_serial, "none")) {
1273         if (g_str_equal(spapr->host_serial, "passthrough")) {
1274             /* -M host-serial=passthrough */
1275             if (kvmppc_get_host_serial(&buf)) {
1276                 _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
1277                 g_free(buf);
1278             }
1279         } else {
1280             /* -M host-serial=<user-string> */
1281             _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial));
1282         }
1283     }
1284 
1285     buf = qemu_uuid_unparse_strdup(&qemu_uuid);
1286 
1287     _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
1288     if (qemu_uuid_set) {
1289         _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
1290     }
1291     g_free(buf);
1292 
1293     if (qemu_get_vm_name()) {
1294         _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
1295                                 qemu_get_vm_name()));
1296     }
1297 
1298     _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
1299     _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
1300 
1301     /* /interrupt controller */
1302     spapr->irq->dt_populate(spapr, spapr_max_server_number(spapr), fdt,
1303                           PHANDLE_INTC);
1304 
1305     ret = spapr_populate_memory(spapr, fdt);
1306     if (ret < 0) {
1307         error_report("couldn't setup memory nodes in fdt");
1308         exit(1);
1309     }
1310 
1311     /* /vdevice */
1312     spapr_dt_vdevice(spapr->vio_bus, fdt);
1313 
1314     if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
1315         ret = spapr_rng_populate_dt(fdt);
1316         if (ret < 0) {
1317             error_report("could not set up rng device in the fdt");
1318             exit(1);
1319         }
1320     }
1321 
1322     QLIST_FOREACH(phb, &spapr->phbs, list) {
1323         ret = spapr_populate_pci_dt(phb, PHANDLE_INTC, fdt,
1324                                     spapr->irq->nr_msis, NULL);
1325         if (ret < 0) {
1326             error_report("couldn't setup PCI devices in fdt");
1327             exit(1);
1328         }
1329     }
1330 
1331     /* cpus */
1332     spapr_populate_cpus_dt_node(fdt, spapr);
1333 
1334     if (smc->dr_lmb_enabled) {
1335         _FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
1336     }
1337 
1338     if (mc->has_hotpluggable_cpus) {
1339         int offset = fdt_path_offset(fdt, "/cpus");
1340         ret = spapr_drc_populate_dt(fdt, offset, NULL,
1341                                     SPAPR_DR_CONNECTOR_TYPE_CPU);
1342         if (ret < 0) {
1343             error_report("Couldn't set up CPU DR device tree properties");
1344             exit(1);
1345         }
1346     }
1347 
1348     /* /event-sources */
1349     spapr_dt_events(spapr, fdt);
1350 
1351     /* /rtas */
1352     spapr_dt_rtas(spapr, fdt);
1353 
1354     /* /chosen */
1355     spapr_dt_chosen(spapr, fdt);
1356 
1357     /* /hypervisor */
1358     if (kvm_enabled()) {
1359         spapr_dt_hypervisor(spapr, fdt);
1360     }
1361 
1362     /* Build memory reserve map */
1363     if (spapr->kernel_size) {
1364         _FDT((fdt_add_mem_rsv(fdt, KERNEL_LOAD_ADDR, spapr->kernel_size)));
1365     }
1366     if (spapr->initrd_size) {
1367         _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, spapr->initrd_size)));
1368     }
1369 
1370     /* ibm,client-architecture-support updates */
1371     ret = spapr_dt_cas_updates(spapr, fdt, spapr->ov5_cas);
1372     if (ret < 0) {
1373         error_report("couldn't setup CAS properties fdt");
1374         exit(1);
1375     }
1376 
1377     if (smc->dr_phb_enabled) {
1378         ret = spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_PHB);
1379         if (ret < 0) {
1380             error_report("Couldn't set up PHB DR device tree properties");
1381             exit(1);
1382         }
1383     }
1384 
1385     return fdt;
1386 }
1387 
1388 static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
1389 {
1390     return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
1391 }
1392 
1393 static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1394                                     PowerPCCPU *cpu)
1395 {
1396     CPUPPCState *env = &cpu->env;
1397 
1398     /* The TCG path should also be holding the BQL at this point */
1399     g_assert(qemu_mutex_iothread_locked());
1400 
1401     if (msr_pr) {
1402         hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1403         env->gpr[3] = H_PRIVILEGE;
1404     } else {
1405         env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
1406     }
1407 }
1408 
1409 struct LPCRSyncState {
1410     target_ulong value;
1411     target_ulong mask;
1412 };
1413 
1414 static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg)
1415 {
1416     struct LPCRSyncState *s = arg.host_ptr;
1417     PowerPCCPU *cpu = POWERPC_CPU(cs);
1418     CPUPPCState *env = &cpu->env;
1419     target_ulong lpcr;
1420 
1421     cpu_synchronize_state(cs);
1422     lpcr = env->spr[SPR_LPCR];
1423     lpcr &= ~s->mask;
1424     lpcr |= s->value;
1425     ppc_store_lpcr(cpu, lpcr);
1426 }
1427 
1428 void spapr_set_all_lpcrs(target_ulong value, target_ulong mask)
1429 {
1430     CPUState *cs;
1431     struct LPCRSyncState s = {
1432         .value = value,
1433         .mask = mask
1434     };
1435     CPU_FOREACH(cs) {
1436         run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s));
1437     }
1438 }
1439 
1440 static void spapr_get_pate(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry)
1441 {
1442     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1443 
1444     /* Copy PATE1:GR into PATE0:HR */
1445     entry->dw0 = spapr->patb_entry & PATE0_HR;
1446     entry->dw1 = spapr->patb_entry;
1447 }
1448 
1449 #define HPTE(_table, _i)   (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1450 #define HPTE_VALID(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1451 #define HPTE_DIRTY(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1452 #define CLEAN_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1453 #define DIRTY_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1454 
1455 /*
1456  * Get the fd to access the kernel htab, re-opening it if necessary
1457  */
1458 static int get_htab_fd(SpaprMachineState *spapr)
1459 {
1460     Error *local_err = NULL;
1461 
1462     if (spapr->htab_fd >= 0) {
1463         return spapr->htab_fd;
1464     }
1465 
1466     spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err);
1467     if (spapr->htab_fd < 0) {
1468         error_report_err(local_err);
1469     }
1470 
1471     return spapr->htab_fd;
1472 }
1473 
1474 void close_htab_fd(SpaprMachineState *spapr)
1475 {
1476     if (spapr->htab_fd >= 0) {
1477         close(spapr->htab_fd);
1478     }
1479     spapr->htab_fd = -1;
1480 }
1481 
1482 static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
1483 {
1484     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1485 
1486     return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
1487 }
1488 
1489 static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp)
1490 {
1491     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1492 
1493     assert(kvm_enabled());
1494 
1495     if (!spapr->htab) {
1496         return 0;
1497     }
1498 
1499     return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18);
1500 }
1501 
1502 static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
1503                                                 hwaddr ptex, int n)
1504 {
1505     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1506     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
1507 
1508     if (!spapr->htab) {
1509         /*
1510          * HTAB is controlled by KVM. Fetch into temporary buffer
1511          */
1512         ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
1513         kvmppc_read_hptes(hptes, ptex, n);
1514         return hptes;
1515     }
1516 
1517     /*
1518      * HTAB is controlled by QEMU. Just point to the internally
1519      * accessible PTEG.
1520      */
1521     return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
1522 }
1523 
1524 static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
1525                               const ppc_hash_pte64_t *hptes,
1526                               hwaddr ptex, int n)
1527 {
1528     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1529 
1530     if (!spapr->htab) {
1531         g_free((void *)hptes);
1532     }
1533 
1534     /* Nothing to do for qemu managed HPT */
1535 }
1536 
1537 static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1538                              uint64_t pte0, uint64_t pte1)
1539 {
1540     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1541     hwaddr offset = ptex * HASH_PTE_SIZE_64;
1542 
1543     if (!spapr->htab) {
1544         kvmppc_write_hpte(ptex, pte0, pte1);
1545     } else {
1546         if (pte0 & HPTE64_V_VALID) {
1547             stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
1548             /*
1549              * When setting valid, we write PTE1 first. This ensures
1550              * proper synchronization with the reading code in
1551              * ppc_hash64_pteg_search()
1552              */
1553             smp_wmb();
1554             stq_p(spapr->htab + offset, pte0);
1555         } else {
1556             stq_p(spapr->htab + offset, pte0);
1557             /*
1558              * When clearing it we set PTE0 first. This ensures proper
1559              * synchronization with the reading code in
1560              * ppc_hash64_pteg_search()
1561              */
1562             smp_wmb();
1563             stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
1564         }
1565     }
1566 }
1567 
1568 int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
1569 {
1570     int shift;
1571 
1572     /* We aim for a hash table of size 1/128 the size of RAM (rounded
1573      * up).  The PAPR recommendation is actually 1/64 of RAM size, but
1574      * that's much more than is needed for Linux guests */
1575     shift = ctz64(pow2ceil(ramsize)) - 7;
1576     shift = MAX(shift, 18); /* Minimum architected size */
1577     shift = MIN(shift, 46); /* Maximum architected size */
1578     return shift;
1579 }
1580 
1581 void spapr_free_hpt(SpaprMachineState *spapr)
1582 {
1583     g_free(spapr->htab);
1584     spapr->htab = NULL;
1585     spapr->htab_shift = 0;
1586     close_htab_fd(spapr);
1587 }
1588 
1589 void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift,
1590                           Error **errp)
1591 {
1592     long rc;
1593 
1594     /* Clean up any HPT info from a previous boot */
1595     spapr_free_hpt(spapr);
1596 
1597     rc = kvmppc_reset_htab(shift);
1598     if (rc < 0) {
1599         /* kernel-side HPT needed, but couldn't allocate one */
1600         error_setg_errno(errp, errno,
1601                          "Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
1602                          shift);
1603         /* This is almost certainly fatal, but if the caller really
1604          * wants to carry on with shift == 0, it's welcome to try */
1605     } else if (rc > 0) {
1606         /* kernel-side HPT allocated */
1607         if (rc != shift) {
1608             error_setg(errp,
1609                        "Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
1610                        shift, rc);
1611         }
1612 
1613         spapr->htab_shift = shift;
1614         spapr->htab = NULL;
1615     } else {
1616         /* kernel-side HPT not needed, allocate in userspace instead */
1617         size_t size = 1ULL << shift;
1618         int i;
1619 
1620         spapr->htab = qemu_memalign(size, size);
1621         if (!spapr->htab) {
1622             error_setg_errno(errp, errno,
1623                              "Could not allocate HPT of order %d", shift);
1624             return;
1625         }
1626 
1627         memset(spapr->htab, 0, size);
1628         spapr->htab_shift = shift;
1629 
1630         for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
1631             DIRTY_HPTE(HPTE(spapr->htab, i));
1632         }
1633     }
1634     /* We're setting up a hash table, so that means we're not radix */
1635     spapr->patb_entry = 0;
1636     spapr_set_all_lpcrs(0, LPCR_HR | LPCR_UPRT);
1637 }
1638 
1639 void spapr_setup_hpt_and_vrma(SpaprMachineState *spapr)
1640 {
1641     int hpt_shift;
1642 
1643     if ((spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED)
1644         || (spapr->cas_reboot
1645             && !spapr_ovec_test(spapr->ov5_cas, OV5_HPT_RESIZE))) {
1646         hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1647     } else {
1648         uint64_t current_ram_size;
1649 
1650         current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
1651         hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size);
1652     }
1653     spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal);
1654 
1655     if (spapr->vrma_adjust) {
1656         spapr->rma_size = kvmppc_rma_size(spapr_node0_size(MACHINE(spapr)),
1657                                           spapr->htab_shift);
1658     }
1659 }
1660 
1661 static int spapr_reset_drcs(Object *child, void *opaque)
1662 {
1663     SpaprDrc *drc =
1664         (SpaprDrc *) object_dynamic_cast(child,
1665                                                  TYPE_SPAPR_DR_CONNECTOR);
1666 
1667     if (drc) {
1668         spapr_drc_reset(drc);
1669     }
1670 
1671     return 0;
1672 }
1673 
1674 static void spapr_machine_reset(void)
1675 {
1676     MachineState *machine = MACHINE(qdev_get_machine());
1677     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
1678     PowerPCCPU *first_ppc_cpu;
1679     uint32_t rtas_limit;
1680     hwaddr rtas_addr, fdt_addr;
1681     void *fdt;
1682     int rc;
1683 
1684     spapr_caps_apply(spapr);
1685 
1686     first_ppc_cpu = POWERPC_CPU(first_cpu);
1687     if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1688         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
1689                               spapr->max_compat_pvr)) {
1690         /*
1691          * If using KVM with radix mode available, VCPUs can be started
1692          * without a HPT because KVM will start them in radix mode.
1693          * Set the GR bit in PATE so that we know there is no HPT.
1694          */
1695         spapr->patb_entry = PATE1_GR;
1696         spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT);
1697     } else {
1698         spapr_setup_hpt_and_vrma(spapr);
1699     }
1700 
1701     /*
1702      * If this reset wasn't generated by CAS, we should reset our
1703      * negotiated options and start from scratch
1704      */
1705     if (!spapr->cas_reboot) {
1706         spapr_ovec_cleanup(spapr->ov5_cas);
1707         spapr->ov5_cas = spapr_ovec_new();
1708 
1709         ppc_set_compat(first_ppc_cpu, spapr->max_compat_pvr, &error_fatal);
1710     }
1711 
1712     if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
1713         spapr_irq_msi_reset(spapr);
1714     }
1715 
1716     qemu_devices_reset();
1717 
1718     /*
1719      * This is fixing some of the default configuration of the XIVE
1720      * devices. To be called after the reset of the machine devices.
1721      */
1722     spapr_irq_reset(spapr, &error_fatal);
1723 
1724     /*
1725      * There is no CAS under qtest. Simulate one to please the code that
1726      * depends on spapr->ov5_cas. This is especially needed to test device
1727      * unplug, so we do that before resetting the DRCs.
1728      */
1729     if (qtest_enabled()) {
1730         spapr_ovec_cleanup(spapr->ov5_cas);
1731         spapr->ov5_cas = spapr_ovec_clone(spapr->ov5);
1732     }
1733 
1734     /* DRC reset may cause a device to be unplugged. This will cause troubles
1735      * if this device is used by another device (eg, a running vhost backend
1736      * will crash QEMU if the DIMM holding the vring goes away). To avoid such
1737      * situations, we reset DRCs after all devices have been reset.
1738      */
1739     object_child_foreach_recursive(object_get_root(), spapr_reset_drcs, NULL);
1740 
1741     spapr_clear_pending_events(spapr);
1742 
1743     /*
1744      * We place the device tree and RTAS just below either the top of the RMA,
1745      * or just below 2GB, whichever is lower, so that it can be
1746      * processed with 32-bit real mode code if necessary
1747      */
1748     rtas_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR);
1749     rtas_addr = rtas_limit - RTAS_MAX_SIZE;
1750     fdt_addr = rtas_addr - FDT_MAX_SIZE;
1751 
1752     fdt = spapr_build_fdt(spapr);
1753 
1754     spapr_load_rtas(spapr, fdt, rtas_addr);
1755 
1756     rc = fdt_pack(fdt);
1757 
1758     /* Should only fail if we've built a corrupted tree */
1759     assert(rc == 0);
1760 
1761     if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
1762         error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
1763                      fdt_totalsize(fdt), FDT_MAX_SIZE);
1764         exit(1);
1765     }
1766 
1767     /* Load the fdt */
1768     qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
1769     cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
1770     g_free(spapr->fdt_blob);
1771     spapr->fdt_size = fdt_totalsize(fdt);
1772     spapr->fdt_initial_size = spapr->fdt_size;
1773     spapr->fdt_blob = fdt;
1774 
1775     /* Set up the entry state */
1776     spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, fdt_addr);
1777     first_ppc_cpu->env.gpr[5] = 0;
1778 
1779     spapr->cas_reboot = false;
1780 }
1781 
1782 static void spapr_create_nvram(SpaprMachineState *spapr)
1783 {
1784     DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram");
1785     DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
1786 
1787     if (dinfo) {
1788         qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo),
1789                             &error_fatal);
1790     }
1791 
1792     qdev_init_nofail(dev);
1793 
1794     spapr->nvram = (struct SpaprNvram *)dev;
1795 }
1796 
1797 static void spapr_rtc_create(SpaprMachineState *spapr)
1798 {
1799     object_initialize_child(OBJECT(spapr), "rtc",
1800                             &spapr->rtc, sizeof(spapr->rtc), TYPE_SPAPR_RTC,
1801                             &error_fatal, NULL);
1802     object_property_set_bool(OBJECT(&spapr->rtc), true, "realized",
1803                               &error_fatal);
1804     object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
1805                               "date", &error_fatal);
1806 }
1807 
1808 /* Returns whether we want to use VGA or not */
1809 static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
1810 {
1811     switch (vga_interface_type) {
1812     case VGA_NONE:
1813         return false;
1814     case VGA_DEVICE:
1815         return true;
1816     case VGA_STD:
1817     case VGA_VIRTIO:
1818     case VGA_CIRRUS:
1819         return pci_vga_init(pci_bus) != NULL;
1820     default:
1821         error_setg(errp,
1822                    "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1823         return false;
1824     }
1825 }
1826 
1827 static int spapr_pre_load(void *opaque)
1828 {
1829     int rc;
1830 
1831     rc = spapr_caps_pre_load(opaque);
1832     if (rc) {
1833         return rc;
1834     }
1835 
1836     return 0;
1837 }
1838 
1839 static int spapr_post_load(void *opaque, int version_id)
1840 {
1841     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1842     int err = 0;
1843 
1844     err = spapr_caps_post_migration(spapr);
1845     if (err) {
1846         return err;
1847     }
1848 
1849     /*
1850      * In earlier versions, there was no separate qdev for the PAPR
1851      * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1852      * So when migrating from those versions, poke the incoming offset
1853      * value into the RTC device
1854      */
1855     if (version_id < 3) {
1856         err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
1857         if (err) {
1858             return err;
1859         }
1860     }
1861 
1862     if (kvm_enabled() && spapr->patb_entry) {
1863         PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
1864         bool radix = !!(spapr->patb_entry & PATE1_GR);
1865         bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
1866 
1867         /*
1868          * Update LPCR:HR and UPRT as they may not be set properly in
1869          * the stream
1870          */
1871         spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0,
1872                             LPCR_HR | LPCR_UPRT);
1873 
1874         err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
1875         if (err) {
1876             error_report("Process table config unsupported by the host");
1877             return -EINVAL;
1878         }
1879     }
1880 
1881     err = spapr_irq_post_load(spapr, version_id);
1882     if (err) {
1883         return err;
1884     }
1885 
1886     return err;
1887 }
1888 
1889 static int spapr_pre_save(void *opaque)
1890 {
1891     int rc;
1892 
1893     rc = spapr_caps_pre_save(opaque);
1894     if (rc) {
1895         return rc;
1896     }
1897 
1898     return 0;
1899 }
1900 
1901 static bool version_before_3(void *opaque, int version_id)
1902 {
1903     return version_id < 3;
1904 }
1905 
1906 static bool spapr_pending_events_needed(void *opaque)
1907 {
1908     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1909     return !QTAILQ_EMPTY(&spapr->pending_events);
1910 }
1911 
1912 static const VMStateDescription vmstate_spapr_event_entry = {
1913     .name = "spapr_event_log_entry",
1914     .version_id = 1,
1915     .minimum_version_id = 1,
1916     .fields = (VMStateField[]) {
1917         VMSTATE_UINT32(summary, SpaprEventLogEntry),
1918         VMSTATE_UINT32(extended_length, SpaprEventLogEntry),
1919         VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0,
1920                                      NULL, extended_length),
1921         VMSTATE_END_OF_LIST()
1922     },
1923 };
1924 
1925 static const VMStateDescription vmstate_spapr_pending_events = {
1926     .name = "spapr_pending_events",
1927     .version_id = 1,
1928     .minimum_version_id = 1,
1929     .needed = spapr_pending_events_needed,
1930     .fields = (VMStateField[]) {
1931         VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1,
1932                          vmstate_spapr_event_entry, SpaprEventLogEntry, next),
1933         VMSTATE_END_OF_LIST()
1934     },
1935 };
1936 
1937 static bool spapr_ov5_cas_needed(void *opaque)
1938 {
1939     SpaprMachineState *spapr = opaque;
1940     SpaprOptionVector *ov5_mask = spapr_ovec_new();
1941     SpaprOptionVector *ov5_legacy = spapr_ovec_new();
1942     SpaprOptionVector *ov5_removed = spapr_ovec_new();
1943     bool cas_needed;
1944 
1945     /* Prior to the introduction of SpaprOptionVector, we had two option
1946      * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1947      * Both of these options encode machine topology into the device-tree
1948      * in such a way that the now-booted OS should still be able to interact
1949      * appropriately with QEMU regardless of what options were actually
1950      * negotiatied on the source side.
1951      *
1952      * As such, we can avoid migrating the CAS-negotiated options if these
1953      * are the only options available on the current machine/platform.
1954      * Since these are the only options available for pseries-2.7 and
1955      * earlier, this allows us to maintain old->new/new->old migration
1956      * compatibility.
1957      *
1958      * For QEMU 2.8+, there are additional CAS-negotiatable options available
1959      * via default pseries-2.8 machines and explicit command-line parameters.
1960      * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
1961      * of the actual CAS-negotiated values to continue working properly. For
1962      * example, availability of memory unplug depends on knowing whether
1963      * OV5_HP_EVT was negotiated via CAS.
1964      *
1965      * Thus, for any cases where the set of available CAS-negotiatable
1966      * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
1967      * include the CAS-negotiated options in the migration stream, unless
1968      * if they affect boot time behaviour only.
1969      */
1970     spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
1971     spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
1972     spapr_ovec_set(ov5_mask, OV5_DRMEM_V2);
1973 
1974     /* spapr_ovec_diff returns true if bits were removed. we avoid using
1975      * the mask itself since in the future it's possible "legacy" bits may be
1976      * removed via machine options, which could generate a false positive
1977      * that breaks migration.
1978      */
1979     spapr_ovec_intersect(ov5_legacy, spapr->ov5, ov5_mask);
1980     cas_needed = spapr_ovec_diff(ov5_removed, spapr->ov5, ov5_legacy);
1981 
1982     spapr_ovec_cleanup(ov5_mask);
1983     spapr_ovec_cleanup(ov5_legacy);
1984     spapr_ovec_cleanup(ov5_removed);
1985 
1986     return cas_needed;
1987 }
1988 
1989 static const VMStateDescription vmstate_spapr_ov5_cas = {
1990     .name = "spapr_option_vector_ov5_cas",
1991     .version_id = 1,
1992     .minimum_version_id = 1,
1993     .needed = spapr_ov5_cas_needed,
1994     .fields = (VMStateField[]) {
1995         VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1,
1996                                  vmstate_spapr_ovec, SpaprOptionVector),
1997         VMSTATE_END_OF_LIST()
1998     },
1999 };
2000 
2001 static bool spapr_patb_entry_needed(void *opaque)
2002 {
2003     SpaprMachineState *spapr = opaque;
2004 
2005     return !!spapr->patb_entry;
2006 }
2007 
2008 static const VMStateDescription vmstate_spapr_patb_entry = {
2009     .name = "spapr_patb_entry",
2010     .version_id = 1,
2011     .minimum_version_id = 1,
2012     .needed = spapr_patb_entry_needed,
2013     .fields = (VMStateField[]) {
2014         VMSTATE_UINT64(patb_entry, SpaprMachineState),
2015         VMSTATE_END_OF_LIST()
2016     },
2017 };
2018 
2019 static bool spapr_irq_map_needed(void *opaque)
2020 {
2021     SpaprMachineState *spapr = opaque;
2022 
2023     return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr);
2024 }
2025 
2026 static const VMStateDescription vmstate_spapr_irq_map = {
2027     .name = "spapr_irq_map",
2028     .version_id = 1,
2029     .minimum_version_id = 1,
2030     .needed = spapr_irq_map_needed,
2031     .fields = (VMStateField[]) {
2032         VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr),
2033         VMSTATE_END_OF_LIST()
2034     },
2035 };
2036 
2037 static bool spapr_dtb_needed(void *opaque)
2038 {
2039     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque);
2040 
2041     return smc->update_dt_enabled;
2042 }
2043 
2044 static int spapr_dtb_pre_load(void *opaque)
2045 {
2046     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2047 
2048     g_free(spapr->fdt_blob);
2049     spapr->fdt_blob = NULL;
2050     spapr->fdt_size = 0;
2051 
2052     return 0;
2053 }
2054 
2055 static const VMStateDescription vmstate_spapr_dtb = {
2056     .name = "spapr_dtb",
2057     .version_id = 1,
2058     .minimum_version_id = 1,
2059     .needed = spapr_dtb_needed,
2060     .pre_load = spapr_dtb_pre_load,
2061     .fields = (VMStateField[]) {
2062         VMSTATE_UINT32(fdt_initial_size, SpaprMachineState),
2063         VMSTATE_UINT32(fdt_size, SpaprMachineState),
2064         VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL,
2065                                      fdt_size),
2066         VMSTATE_END_OF_LIST()
2067     },
2068 };
2069 
2070 static const VMStateDescription vmstate_spapr = {
2071     .name = "spapr",
2072     .version_id = 3,
2073     .minimum_version_id = 1,
2074     .pre_load = spapr_pre_load,
2075     .post_load = spapr_post_load,
2076     .pre_save = spapr_pre_save,
2077     .fields = (VMStateField[]) {
2078         /* used to be @next_irq */
2079         VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
2080 
2081         /* RTC offset */
2082         VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3),
2083 
2084         VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2),
2085         VMSTATE_END_OF_LIST()
2086     },
2087     .subsections = (const VMStateDescription*[]) {
2088         &vmstate_spapr_ov5_cas,
2089         &vmstate_spapr_patb_entry,
2090         &vmstate_spapr_pending_events,
2091         &vmstate_spapr_cap_htm,
2092         &vmstate_spapr_cap_vsx,
2093         &vmstate_spapr_cap_dfp,
2094         &vmstate_spapr_cap_cfpc,
2095         &vmstate_spapr_cap_sbbc,
2096         &vmstate_spapr_cap_ibs,
2097         &vmstate_spapr_irq_map,
2098         &vmstate_spapr_cap_nested_kvm_hv,
2099         &vmstate_spapr_dtb,
2100         &vmstate_spapr_cap_large_decr,
2101         &vmstate_spapr_cap_ccf_assist,
2102         NULL
2103     }
2104 };
2105 
2106 static int htab_save_setup(QEMUFile *f, void *opaque)
2107 {
2108     SpaprMachineState *spapr = opaque;
2109 
2110     /* "Iteration" header */
2111     if (!spapr->htab_shift) {
2112         qemu_put_be32(f, -1);
2113     } else {
2114         qemu_put_be32(f, spapr->htab_shift);
2115     }
2116 
2117     if (spapr->htab) {
2118         spapr->htab_save_index = 0;
2119         spapr->htab_first_pass = true;
2120     } else {
2121         if (spapr->htab_shift) {
2122             assert(kvm_enabled());
2123         }
2124     }
2125 
2126 
2127     return 0;
2128 }
2129 
2130 static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr,
2131                             int chunkstart, int n_valid, int n_invalid)
2132 {
2133     qemu_put_be32(f, chunkstart);
2134     qemu_put_be16(f, n_valid);
2135     qemu_put_be16(f, n_invalid);
2136     qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
2137                     HASH_PTE_SIZE_64 * n_valid);
2138 }
2139 
2140 static void htab_save_end_marker(QEMUFile *f)
2141 {
2142     qemu_put_be32(f, 0);
2143     qemu_put_be16(f, 0);
2144     qemu_put_be16(f, 0);
2145 }
2146 
2147 static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr,
2148                                  int64_t max_ns)
2149 {
2150     bool has_timeout = max_ns != -1;
2151     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2152     int index = spapr->htab_save_index;
2153     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2154 
2155     assert(spapr->htab_first_pass);
2156 
2157     do {
2158         int chunkstart;
2159 
2160         /* Consume invalid HPTEs */
2161         while ((index < htabslots)
2162                && !HPTE_VALID(HPTE(spapr->htab, index))) {
2163             CLEAN_HPTE(HPTE(spapr->htab, index));
2164             index++;
2165         }
2166 
2167         /* Consume valid HPTEs */
2168         chunkstart = index;
2169         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2170                && HPTE_VALID(HPTE(spapr->htab, index))) {
2171             CLEAN_HPTE(HPTE(spapr->htab, index));
2172             index++;
2173         }
2174 
2175         if (index > chunkstart) {
2176             int n_valid = index - chunkstart;
2177 
2178             htab_save_chunk(f, spapr, chunkstart, n_valid, 0);
2179 
2180             if (has_timeout &&
2181                 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2182                 break;
2183             }
2184         }
2185     } while ((index < htabslots) && !qemu_file_rate_limit(f));
2186 
2187     if (index >= htabslots) {
2188         assert(index == htabslots);
2189         index = 0;
2190         spapr->htab_first_pass = false;
2191     }
2192     spapr->htab_save_index = index;
2193 }
2194 
2195 static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
2196                                 int64_t max_ns)
2197 {
2198     bool final = max_ns < 0;
2199     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2200     int examined = 0, sent = 0;
2201     int index = spapr->htab_save_index;
2202     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2203 
2204     assert(!spapr->htab_first_pass);
2205 
2206     do {
2207         int chunkstart, invalidstart;
2208 
2209         /* Consume non-dirty HPTEs */
2210         while ((index < htabslots)
2211                && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
2212             index++;
2213             examined++;
2214         }
2215 
2216         chunkstart = index;
2217         /* Consume valid dirty HPTEs */
2218         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2219                && HPTE_DIRTY(HPTE(spapr->htab, index))
2220                && HPTE_VALID(HPTE(spapr->htab, index))) {
2221             CLEAN_HPTE(HPTE(spapr->htab, index));
2222             index++;
2223             examined++;
2224         }
2225 
2226         invalidstart = index;
2227         /* Consume invalid dirty HPTEs */
2228         while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
2229                && HPTE_DIRTY(HPTE(spapr->htab, index))
2230                && !HPTE_VALID(HPTE(spapr->htab, index))) {
2231             CLEAN_HPTE(HPTE(spapr->htab, index));
2232             index++;
2233             examined++;
2234         }
2235 
2236         if (index > chunkstart) {
2237             int n_valid = invalidstart - chunkstart;
2238             int n_invalid = index - invalidstart;
2239 
2240             htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid);
2241             sent += index - chunkstart;
2242 
2243             if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2244                 break;
2245             }
2246         }
2247 
2248         if (examined >= htabslots) {
2249             break;
2250         }
2251 
2252         if (index >= htabslots) {
2253             assert(index == htabslots);
2254             index = 0;
2255         }
2256     } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final));
2257 
2258     if (index >= htabslots) {
2259         assert(index == htabslots);
2260         index = 0;
2261     }
2262 
2263     spapr->htab_save_index = index;
2264 
2265     return (examined >= htabslots) && (sent == 0) ? 1 : 0;
2266 }
2267 
2268 #define MAX_ITERATION_NS    5000000 /* 5 ms */
2269 #define MAX_KVM_BUF_SIZE    2048
2270 
2271 static int htab_save_iterate(QEMUFile *f, void *opaque)
2272 {
2273     SpaprMachineState *spapr = opaque;
2274     int fd;
2275     int rc = 0;
2276 
2277     /* Iteration header */
2278     if (!spapr->htab_shift) {
2279         qemu_put_be32(f, -1);
2280         return 1;
2281     } else {
2282         qemu_put_be32(f, 0);
2283     }
2284 
2285     if (!spapr->htab) {
2286         assert(kvm_enabled());
2287 
2288         fd = get_htab_fd(spapr);
2289         if (fd < 0) {
2290             return fd;
2291         }
2292 
2293         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
2294         if (rc < 0) {
2295             return rc;
2296         }
2297     } else  if (spapr->htab_first_pass) {
2298         htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
2299     } else {
2300         rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
2301     }
2302 
2303     htab_save_end_marker(f);
2304 
2305     return rc;
2306 }
2307 
2308 static int htab_save_complete(QEMUFile *f, void *opaque)
2309 {
2310     SpaprMachineState *spapr = opaque;
2311     int fd;
2312 
2313     /* Iteration header */
2314     if (!spapr->htab_shift) {
2315         qemu_put_be32(f, -1);
2316         return 0;
2317     } else {
2318         qemu_put_be32(f, 0);
2319     }
2320 
2321     if (!spapr->htab) {
2322         int rc;
2323 
2324         assert(kvm_enabled());
2325 
2326         fd = get_htab_fd(spapr);
2327         if (fd < 0) {
2328             return fd;
2329         }
2330 
2331         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
2332         if (rc < 0) {
2333             return rc;
2334         }
2335     } else {
2336         if (spapr->htab_first_pass) {
2337             htab_save_first_pass(f, spapr, -1);
2338         }
2339         htab_save_later_pass(f, spapr, -1);
2340     }
2341 
2342     /* End marker */
2343     htab_save_end_marker(f);
2344 
2345     return 0;
2346 }
2347 
2348 static int htab_load(QEMUFile *f, void *opaque, int version_id)
2349 {
2350     SpaprMachineState *spapr = opaque;
2351     uint32_t section_hdr;
2352     int fd = -1;
2353     Error *local_err = NULL;
2354 
2355     if (version_id < 1 || version_id > 1) {
2356         error_report("htab_load() bad version");
2357         return -EINVAL;
2358     }
2359 
2360     section_hdr = qemu_get_be32(f);
2361 
2362     if (section_hdr == -1) {
2363         spapr_free_hpt(spapr);
2364         return 0;
2365     }
2366 
2367     if (section_hdr) {
2368         /* First section gives the htab size */
2369         spapr_reallocate_hpt(spapr, section_hdr, &local_err);
2370         if (local_err) {
2371             error_report_err(local_err);
2372             return -EINVAL;
2373         }
2374         return 0;
2375     }
2376 
2377     if (!spapr->htab) {
2378         assert(kvm_enabled());
2379 
2380         fd = kvmppc_get_htab_fd(true, 0, &local_err);
2381         if (fd < 0) {
2382             error_report_err(local_err);
2383             return fd;
2384         }
2385     }
2386 
2387     while (true) {
2388         uint32_t index;
2389         uint16_t n_valid, n_invalid;
2390 
2391         index = qemu_get_be32(f);
2392         n_valid = qemu_get_be16(f);
2393         n_invalid = qemu_get_be16(f);
2394 
2395         if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
2396             /* End of Stream */
2397             break;
2398         }
2399 
2400         if ((index + n_valid + n_invalid) >
2401             (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
2402             /* Bad index in stream */
2403             error_report(
2404                 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2405                 index, n_valid, n_invalid, spapr->htab_shift);
2406             return -EINVAL;
2407         }
2408 
2409         if (spapr->htab) {
2410             if (n_valid) {
2411                 qemu_get_buffer(f, HPTE(spapr->htab, index),
2412                                 HASH_PTE_SIZE_64 * n_valid);
2413             }
2414             if (n_invalid) {
2415                 memset(HPTE(spapr->htab, index + n_valid), 0,
2416                        HASH_PTE_SIZE_64 * n_invalid);
2417             }
2418         } else {
2419             int rc;
2420 
2421             assert(fd >= 0);
2422 
2423             rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid);
2424             if (rc < 0) {
2425                 return rc;
2426             }
2427         }
2428     }
2429 
2430     if (!spapr->htab) {
2431         assert(fd >= 0);
2432         close(fd);
2433     }
2434 
2435     return 0;
2436 }
2437 
2438 static void htab_save_cleanup(void *opaque)
2439 {
2440     SpaprMachineState *spapr = opaque;
2441 
2442     close_htab_fd(spapr);
2443 }
2444 
2445 static SaveVMHandlers savevm_htab_handlers = {
2446     .save_setup = htab_save_setup,
2447     .save_live_iterate = htab_save_iterate,
2448     .save_live_complete_precopy = htab_save_complete,
2449     .save_cleanup = htab_save_cleanup,
2450     .load_state = htab_load,
2451 };
2452 
2453 static void spapr_boot_set(void *opaque, const char *boot_device,
2454                            Error **errp)
2455 {
2456     MachineState *machine = MACHINE(opaque);
2457     machine->boot_order = g_strdup(boot_device);
2458 }
2459 
2460 static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr)
2461 {
2462     MachineState *machine = MACHINE(spapr);
2463     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
2464     uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
2465     int i;
2466 
2467     for (i = 0; i < nr_lmbs; i++) {
2468         uint64_t addr;
2469 
2470         addr = i * lmb_size + machine->device_memory->base;
2471         spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
2472                                addr / lmb_size);
2473     }
2474 }
2475 
2476 /*
2477  * If RAM size, maxmem size and individual node mem sizes aren't aligned
2478  * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2479  * since we can't support such unaligned sizes with DRCONF_MEMORY.
2480  */
2481 static void spapr_validate_node_memory(MachineState *machine, Error **errp)
2482 {
2483     int i;
2484 
2485     if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2486         error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
2487                    " is not aligned to %" PRIu64 " MiB",
2488                    machine->ram_size,
2489                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2490         return;
2491     }
2492 
2493     if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2494         error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
2495                    " is not aligned to %" PRIu64 " MiB",
2496                    machine->ram_size,
2497                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2498         return;
2499     }
2500 
2501     for (i = 0; i < nb_numa_nodes; i++) {
2502         if (numa_info[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
2503             error_setg(errp,
2504                        "Node %d memory size 0x%" PRIx64
2505                        " is not aligned to %" PRIu64 " MiB",
2506                        i, numa_info[i].node_mem,
2507                        SPAPR_MEMORY_BLOCK_SIZE / MiB);
2508             return;
2509         }
2510     }
2511 }
2512 
2513 /* find cpu slot in machine->possible_cpus by core_id */
2514 static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
2515 {
2516     int index = id / smp_threads;
2517 
2518     if (index >= ms->possible_cpus->len) {
2519         return NULL;
2520     }
2521     if (idx) {
2522         *idx = index;
2523     }
2524     return &ms->possible_cpus->cpus[index];
2525 }
2526 
2527 static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
2528 {
2529     Error *local_err = NULL;
2530     bool vsmt_user = !!spapr->vsmt;
2531     int kvm_smt = kvmppc_smt_threads();
2532     int ret;
2533 
2534     if (!kvm_enabled() && (smp_threads > 1)) {
2535         error_setg(&local_err, "TCG cannot support more than 1 thread/core "
2536                      "on a pseries machine");
2537         goto out;
2538     }
2539     if (!is_power_of_2(smp_threads)) {
2540         error_setg(&local_err, "Cannot support %d threads/core on a pseries "
2541                      "machine because it must be a power of 2", smp_threads);
2542         goto out;
2543     }
2544 
2545     /* Detemine the VSMT mode to use: */
2546     if (vsmt_user) {
2547         if (spapr->vsmt < smp_threads) {
2548             error_setg(&local_err, "Cannot support VSMT mode %d"
2549                          " because it must be >= threads/core (%d)",
2550                          spapr->vsmt, smp_threads);
2551             goto out;
2552         }
2553         /* In this case, spapr->vsmt has been set by the command line */
2554     } else {
2555         /*
2556          * Default VSMT value is tricky, because we need it to be as
2557          * consistent as possible (for migration), but this requires
2558          * changing it for at least some existing cases.  We pick 8 as
2559          * the value that we'd get with KVM on POWER8, the
2560          * overwhelmingly common case in production systems.
2561          */
2562         spapr->vsmt = MAX(8, smp_threads);
2563     }
2564 
2565     /* KVM: If necessary, set the SMT mode: */
2566     if (kvm_enabled() && (spapr->vsmt != kvm_smt)) {
2567         ret = kvmppc_set_smt_threads(spapr->vsmt);
2568         if (ret) {
2569             /* Looks like KVM isn't able to change VSMT mode */
2570             error_setg(&local_err,
2571                        "Failed to set KVM's VSMT mode to %d (errno %d)",
2572                        spapr->vsmt, ret);
2573             /* We can live with that if the default one is big enough
2574              * for the number of threads, and a submultiple of the one
2575              * we want.  In this case we'll waste some vcpu ids, but
2576              * behaviour will be correct */
2577             if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) {
2578                 warn_report_err(local_err);
2579                 local_err = NULL;
2580                 goto out;
2581             } else {
2582                 if (!vsmt_user) {
2583                     error_append_hint(&local_err,
2584                                       "On PPC, a VM with %d threads/core"
2585                                       " on a host with %d threads/core"
2586                                       " requires the use of VSMT mode %d.\n",
2587                                       smp_threads, kvm_smt, spapr->vsmt);
2588                 }
2589                 kvmppc_hint_smt_possible(&local_err);
2590                 goto out;
2591             }
2592         }
2593     }
2594     /* else TCG: nothing to do currently */
2595 out:
2596     error_propagate(errp, local_err);
2597 }
2598 
2599 static void spapr_init_cpus(SpaprMachineState *spapr)
2600 {
2601     MachineState *machine = MACHINE(spapr);
2602     MachineClass *mc = MACHINE_GET_CLASS(machine);
2603     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2604     const char *type = spapr_get_cpu_core_type(machine->cpu_type);
2605     const CPUArchIdList *possible_cpus;
2606     int boot_cores_nr = smp_cpus / smp_threads;
2607     int i;
2608 
2609     possible_cpus = mc->possible_cpu_arch_ids(machine);
2610     if (mc->has_hotpluggable_cpus) {
2611         if (smp_cpus % smp_threads) {
2612             error_report("smp_cpus (%u) must be multiple of threads (%u)",
2613                          smp_cpus, smp_threads);
2614             exit(1);
2615         }
2616         if (max_cpus % smp_threads) {
2617             error_report("max_cpus (%u) must be multiple of threads (%u)",
2618                          max_cpus, smp_threads);
2619             exit(1);
2620         }
2621     } else {
2622         if (max_cpus != smp_cpus) {
2623             error_report("This machine version does not support CPU hotplug");
2624             exit(1);
2625         }
2626         boot_cores_nr = possible_cpus->len;
2627     }
2628 
2629     if (smc->pre_2_10_has_unused_icps) {
2630         int i;
2631 
2632         for (i = 0; i < spapr_max_server_number(spapr); i++) {
2633             /* Dummy entries get deregistered when real ICPState objects
2634              * are registered during CPU core hotplug.
2635              */
2636             pre_2_10_vmstate_register_dummy_icp(i);
2637         }
2638     }
2639 
2640     for (i = 0; i < possible_cpus->len; i++) {
2641         int core_id = i * smp_threads;
2642 
2643         if (mc->has_hotpluggable_cpus) {
2644             spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
2645                                    spapr_vcpu_id(spapr, core_id));
2646         }
2647 
2648         if (i < boot_cores_nr) {
2649             Object *core  = object_new(type);
2650             int nr_threads = smp_threads;
2651 
2652             /* Handle the partially filled core for older machine types */
2653             if ((i + 1) * smp_threads >= smp_cpus) {
2654                 nr_threads = smp_cpus - i * smp_threads;
2655             }
2656 
2657             object_property_set_int(core, nr_threads, "nr-threads",
2658                                     &error_fatal);
2659             object_property_set_int(core, core_id, CPU_CORE_PROP_CORE_ID,
2660                                     &error_fatal);
2661             object_property_set_bool(core, true, "realized", &error_fatal);
2662 
2663             object_unref(core);
2664         }
2665     }
2666 }
2667 
2668 static PCIHostState *spapr_create_default_phb(void)
2669 {
2670     DeviceState *dev;
2671 
2672     dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
2673     qdev_prop_set_uint32(dev, "index", 0);
2674     qdev_init_nofail(dev);
2675 
2676     return PCI_HOST_BRIDGE(dev);
2677 }
2678 
2679 /* pSeries LPAR / sPAPR hardware init */
2680 static void spapr_machine_init(MachineState *machine)
2681 {
2682     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
2683     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2684     const char *kernel_filename = machine->kernel_filename;
2685     const char *initrd_filename = machine->initrd_filename;
2686     PCIHostState *phb;
2687     int i;
2688     MemoryRegion *sysmem = get_system_memory();
2689     MemoryRegion *ram = g_new(MemoryRegion, 1);
2690     hwaddr node0_size = spapr_node0_size(machine);
2691     long load_limit, fw_size;
2692     char *filename;
2693     Error *resize_hpt_err = NULL;
2694 
2695     msi_nonbroken = true;
2696 
2697     QLIST_INIT(&spapr->phbs);
2698     QTAILQ_INIT(&spapr->pending_dimm_unplugs);
2699 
2700     /* Determine capabilities to run with */
2701     spapr_caps_init(spapr);
2702 
2703     kvmppc_check_papr_resize_hpt(&resize_hpt_err);
2704     if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) {
2705         /*
2706          * If the user explicitly requested a mode we should either
2707          * supply it, or fail completely (which we do below).  But if
2708          * it's not set explicitly, we reset our mode to something
2709          * that works
2710          */
2711         if (resize_hpt_err) {
2712             spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
2713             error_free(resize_hpt_err);
2714             resize_hpt_err = NULL;
2715         } else {
2716             spapr->resize_hpt = smc->resize_hpt_default;
2717         }
2718     }
2719 
2720     assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT);
2721 
2722     if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) {
2723         /*
2724          * User requested HPT resize, but this host can't supply it.  Bail out
2725          */
2726         error_report_err(resize_hpt_err);
2727         exit(1);
2728     }
2729 
2730     spapr->rma_size = node0_size;
2731 
2732     /* With KVM, we don't actually know whether KVM supports an
2733      * unbounded RMA (PR KVM) or is limited by the hash table size
2734      * (HV KVM using VRMA), so we always assume the latter
2735      *
2736      * In that case, we also limit the initial allocations for RTAS
2737      * etc... to 256M since we have no way to know what the VRMA size
2738      * is going to be as it depends on the size of the hash table
2739      * which isn't determined yet.
2740      */
2741     if (kvm_enabled()) {
2742         spapr->vrma_adjust = 1;
2743         spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
2744     }
2745 
2746     /* Actually we don't support unbounded RMA anymore since we added
2747      * proper emulation of HV mode. The max we can get is 16G which
2748      * also happens to be what we configure for PAPR mode so make sure
2749      * we don't do anything bigger than that
2750      */
2751     spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
2752 
2753     if (spapr->rma_size > node0_size) {
2754         error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")",
2755                      spapr->rma_size);
2756         exit(1);
2757     }
2758 
2759     /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2760     load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
2761 
2762     /*
2763      * VSMT must be set in order to be able to compute VCPU ids, ie to
2764      * call spapr_max_server_number() or spapr_vcpu_id().
2765      */
2766     spapr_set_vsmt_mode(spapr, &error_fatal);
2767 
2768     /* Set up Interrupt Controller before we create the VCPUs */
2769     spapr_irq_init(spapr, &error_fatal);
2770 
2771     /* Set up containers for ibm,client-architecture-support negotiated options
2772      */
2773     spapr->ov5 = spapr_ovec_new();
2774     spapr->ov5_cas = spapr_ovec_new();
2775 
2776     if (smc->dr_lmb_enabled) {
2777         spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
2778         spapr_validate_node_memory(machine, &error_fatal);
2779     }
2780 
2781     spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
2782 
2783     /* advertise support for dedicated HP event source to guests */
2784     if (spapr->use_hotplug_event_source) {
2785         spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
2786     }
2787 
2788     /* advertise support for HPT resizing */
2789     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
2790         spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE);
2791     }
2792 
2793     /* advertise support for ibm,dyamic-memory-v2 */
2794     spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2);
2795 
2796     /* advertise XIVE on POWER9 machines */
2797     if (spapr->irq->ov5 & (SPAPR_OV5_XIVE_EXPLOIT | SPAPR_OV5_XIVE_BOTH)) {
2798         if (ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
2799                                   0, spapr->max_compat_pvr)) {
2800             spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
2801         } else if (spapr->irq->ov5 & SPAPR_OV5_XIVE_EXPLOIT) {
2802             error_report("XIVE-only machines require a POWER9 CPU");
2803             exit(1);
2804         }
2805     }
2806 
2807     /* init CPUs */
2808     spapr_init_cpus(spapr);
2809 
2810     if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
2811         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
2812                               spapr->max_compat_pvr)) {
2813         /* KVM and TCG always allow GTSE with radix... */
2814         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
2815     }
2816     /* ... but not with hash (currently). */
2817 
2818     if (kvm_enabled()) {
2819         /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2820         kvmppc_enable_logical_ci_hcalls();
2821         kvmppc_enable_set_mode_hcall();
2822 
2823         /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2824         kvmppc_enable_clear_ref_mod_hcalls();
2825 
2826         /* Enable H_PAGE_INIT */
2827         kvmppc_enable_h_page_init();
2828     }
2829 
2830     /* allocate RAM */
2831     memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram",
2832                                          machine->ram_size);
2833     memory_region_add_subregion(sysmem, 0, ram);
2834 
2835     /* always allocate the device memory information */
2836     machine->device_memory = g_malloc0(sizeof(*machine->device_memory));
2837 
2838     /* initialize hotplug memory address space */
2839     if (machine->ram_size < machine->maxram_size) {
2840         ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
2841         /*
2842          * Limit the number of hotpluggable memory slots to half the number
2843          * slots that KVM supports, leaving the other half for PCI and other
2844          * devices. However ensure that number of slots doesn't drop below 32.
2845          */
2846         int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
2847                            SPAPR_MAX_RAM_SLOTS;
2848 
2849         if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
2850             max_memslots = SPAPR_MAX_RAM_SLOTS;
2851         }
2852         if (machine->ram_slots > max_memslots) {
2853             error_report("Specified number of memory slots %"
2854                          PRIu64" exceeds max supported %d",
2855                          machine->ram_slots, max_memslots);
2856             exit(1);
2857         }
2858 
2859         machine->device_memory->base = ROUND_UP(machine->ram_size,
2860                                                 SPAPR_DEVICE_MEM_ALIGN);
2861         memory_region_init(&machine->device_memory->mr, OBJECT(spapr),
2862                            "device-memory", device_mem_size);
2863         memory_region_add_subregion(sysmem, machine->device_memory->base,
2864                                     &machine->device_memory->mr);
2865     }
2866 
2867     if (smc->dr_lmb_enabled) {
2868         spapr_create_lmb_dr_connectors(spapr);
2869     }
2870 
2871     filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
2872     if (!filename) {
2873         error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
2874         exit(1);
2875     }
2876     spapr->rtas_size = get_image_size(filename);
2877     if (spapr->rtas_size < 0) {
2878         error_report("Could not get size of LPAR rtas '%s'", filename);
2879         exit(1);
2880     }
2881     spapr->rtas_blob = g_malloc(spapr->rtas_size);
2882     if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) {
2883         error_report("Could not load LPAR rtas '%s'", filename);
2884         exit(1);
2885     }
2886     if (spapr->rtas_size > RTAS_MAX_SIZE) {
2887         error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
2888                      (size_t)spapr->rtas_size, RTAS_MAX_SIZE);
2889         exit(1);
2890     }
2891     g_free(filename);
2892 
2893     /* Set up RTAS event infrastructure */
2894     spapr_events_init(spapr);
2895 
2896     /* Set up the RTC RTAS interfaces */
2897     spapr_rtc_create(spapr);
2898 
2899     /* Set up VIO bus */
2900     spapr->vio_bus = spapr_vio_bus_init();
2901 
2902     for (i = 0; i < serial_max_hds(); i++) {
2903         if (serial_hd(i)) {
2904             spapr_vty_create(spapr->vio_bus, serial_hd(i));
2905         }
2906     }
2907 
2908     /* We always have at least the nvram device on VIO */
2909     spapr_create_nvram(spapr);
2910 
2911     /*
2912      * Setup hotplug / dynamic-reconfiguration connectors. top-level
2913      * connectors (described in root DT node's "ibm,drc-types" property)
2914      * are pre-initialized here. additional child connectors (such as
2915      * connectors for a PHBs PCI slots) are added as needed during their
2916      * parent's realization.
2917      */
2918     if (smc->dr_phb_enabled) {
2919         for (i = 0; i < SPAPR_MAX_PHBS; i++) {
2920             spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i);
2921         }
2922     }
2923 
2924     /* Set up PCI */
2925     spapr_pci_rtas_init();
2926 
2927     phb = spapr_create_default_phb();
2928 
2929     for (i = 0; i < nb_nics; i++) {
2930         NICInfo *nd = &nd_table[i];
2931 
2932         if (!nd->model) {
2933             nd->model = g_strdup("spapr-vlan");
2934         }
2935 
2936         if (g_str_equal(nd->model, "spapr-vlan") ||
2937             g_str_equal(nd->model, "ibmveth")) {
2938             spapr_vlan_create(spapr->vio_bus, nd);
2939         } else {
2940             pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
2941         }
2942     }
2943 
2944     for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
2945         spapr_vscsi_create(spapr->vio_bus);
2946     }
2947 
2948     /* Graphics */
2949     if (spapr_vga_init(phb->bus, &error_fatal)) {
2950         spapr->has_graphics = true;
2951         machine->usb |= defaults_enabled() && !machine->usb_disabled;
2952     }
2953 
2954     if (machine->usb) {
2955         if (smc->use_ohci_by_default) {
2956             pci_create_simple(phb->bus, -1, "pci-ohci");
2957         } else {
2958             pci_create_simple(phb->bus, -1, "nec-usb-xhci");
2959         }
2960 
2961         if (spapr->has_graphics) {
2962             USBBus *usb_bus = usb_bus_find(-1);
2963 
2964             usb_create_simple(usb_bus, "usb-kbd");
2965             usb_create_simple(usb_bus, "usb-mouse");
2966         }
2967     }
2968 
2969     if (spapr->rma_size < (MIN_RMA_SLOF * MiB)) {
2970         error_report(
2971             "pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)",
2972             MIN_RMA_SLOF);
2973         exit(1);
2974     }
2975 
2976     if (kernel_filename) {
2977         uint64_t lowaddr = 0;
2978 
2979         spapr->kernel_size = load_elf(kernel_filename, NULL,
2980                                       translate_kernel_address, NULL,
2981                                       NULL, &lowaddr, NULL, 1,
2982                                       PPC_ELF_MACHINE, 0, 0);
2983         if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
2984             spapr->kernel_size = load_elf(kernel_filename, NULL,
2985                                           translate_kernel_address, NULL, NULL,
2986                                           &lowaddr, NULL, 0, PPC_ELF_MACHINE,
2987                                           0, 0);
2988             spapr->kernel_le = spapr->kernel_size > 0;
2989         }
2990         if (spapr->kernel_size < 0) {
2991             error_report("error loading %s: %s", kernel_filename,
2992                          load_elf_strerror(spapr->kernel_size));
2993             exit(1);
2994         }
2995 
2996         /* load initrd */
2997         if (initrd_filename) {
2998             /* Try to locate the initrd in the gap between the kernel
2999              * and the firmware. Add a bit of space just in case
3000              */
3001             spapr->initrd_base = (KERNEL_LOAD_ADDR + spapr->kernel_size
3002                                   + 0x1ffff) & ~0xffff;
3003             spapr->initrd_size = load_image_targphys(initrd_filename,
3004                                                      spapr->initrd_base,
3005                                                      load_limit
3006                                                      - spapr->initrd_base);
3007             if (spapr->initrd_size < 0) {
3008                 error_report("could not load initial ram disk '%s'",
3009                              initrd_filename);
3010                 exit(1);
3011             }
3012         }
3013     }
3014 
3015     if (bios_name == NULL) {
3016         bios_name = FW_FILE_NAME;
3017     }
3018     filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
3019     if (!filename) {
3020         error_report("Could not find LPAR firmware '%s'", bios_name);
3021         exit(1);
3022     }
3023     fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
3024     if (fw_size <= 0) {
3025         error_report("Could not load LPAR firmware '%s'", filename);
3026         exit(1);
3027     }
3028     g_free(filename);
3029 
3030     /* FIXME: Should register things through the MachineState's qdev
3031      * interface, this is a legacy from the sPAPREnvironment structure
3032      * which predated MachineState but had a similar function */
3033     vmstate_register(NULL, 0, &vmstate_spapr, spapr);
3034     register_savevm_live(NULL, "spapr/htab", -1, 1,
3035                          &savevm_htab_handlers, spapr);
3036 
3037     qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine),
3038                              &error_fatal);
3039 
3040     qemu_register_boot_set(spapr_boot_set, spapr);
3041 
3042     if (kvm_enabled()) {
3043         /* to stop and start vmclock */
3044         qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
3045                                          &spapr->tb);
3046 
3047         kvmppc_spapr_enable_inkernel_multitce();
3048     }
3049 }
3050 
3051 static int spapr_kvm_type(MachineState *machine, const char *vm_type)
3052 {
3053     if (!vm_type) {
3054         return 0;
3055     }
3056 
3057     if (!strcmp(vm_type, "HV")) {
3058         return 1;
3059     }
3060 
3061     if (!strcmp(vm_type, "PR")) {
3062         return 2;
3063     }
3064 
3065     error_report("Unknown kvm-type specified '%s'", vm_type);
3066     exit(1);
3067 }
3068 
3069 /*
3070  * Implementation of an interface to adjust firmware path
3071  * for the bootindex property handling.
3072  */
3073 static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
3074                                    DeviceState *dev)
3075 {
3076 #define CAST(type, obj, name) \
3077     ((type *)object_dynamic_cast(OBJECT(obj), (name)))
3078     SCSIDevice *d = CAST(SCSIDevice,  dev, TYPE_SCSI_DEVICE);
3079     SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
3080     VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
3081 
3082     if (d) {
3083         void *spapr = CAST(void, bus->parent, "spapr-vscsi");
3084         VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
3085         USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
3086 
3087         if (spapr) {
3088             /*
3089              * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
3090              * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form
3091              * 0x8000 | (target << 8) | (bus << 5) | lun
3092              * (see the "Logical unit addressing format" table in SAM5)
3093              */
3094             unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun;
3095             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3096                                    (uint64_t)id << 48);
3097         } else if (virtio) {
3098             /*
3099              * We use SRP luns of the form 01000000 | (target << 8) | lun
3100              * in the top 32 bits of the 64-bit LUN
3101              * Note: the quote above is from SLOF and it is wrong,
3102              * the actual binding is:
3103              * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
3104              */
3105             unsigned id = 0x1000000 | (d->id << 16) | d->lun;
3106             if (d->lun >= 256) {
3107                 /* Use the LUN "flat space addressing method" */
3108                 id |= 0x4000;
3109             }
3110             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3111                                    (uint64_t)id << 32);
3112         } else if (usb) {
3113             /*
3114              * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
3115              * in the top 32 bits of the 64-bit LUN
3116              */
3117             unsigned usb_port = atoi(usb->port->path);
3118             unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
3119             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3120                                    (uint64_t)id << 32);
3121         }
3122     }
3123 
3124     /*
3125      * SLOF probes the USB devices, and if it recognizes that the device is a
3126      * storage device, it changes its name to "storage" instead of "usb-host",
3127      * and additionally adds a child node for the SCSI LUN, so the correct
3128      * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
3129      */
3130     if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
3131         USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
3132         if (usb_host_dev_is_scsi_storage(usbdev)) {
3133             return g_strdup_printf("storage@%s/disk", usbdev->port->path);
3134         }
3135     }
3136 
3137     if (phb) {
3138         /* Replace "pci" with "pci@800000020000000" */
3139         return g_strdup_printf("pci@%"PRIX64, phb->buid);
3140     }
3141 
3142     if (vsc) {
3143         /* Same logic as virtio above */
3144         unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
3145         return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
3146     }
3147 
3148     if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
3149         /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
3150         PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3151         return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn));
3152     }
3153 
3154     return NULL;
3155 }
3156 
3157 static char *spapr_get_kvm_type(Object *obj, Error **errp)
3158 {
3159     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3160 
3161     return g_strdup(spapr->kvm_type);
3162 }
3163 
3164 static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
3165 {
3166     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3167 
3168     g_free(spapr->kvm_type);
3169     spapr->kvm_type = g_strdup(value);
3170 }
3171 
3172 static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
3173 {
3174     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3175 
3176     return spapr->use_hotplug_event_source;
3177 }
3178 
3179 static void spapr_set_modern_hotplug_events(Object *obj, bool value,
3180                                             Error **errp)
3181 {
3182     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3183 
3184     spapr->use_hotplug_event_source = value;
3185 }
3186 
3187 static bool spapr_get_msix_emulation(Object *obj, Error **errp)
3188 {
3189     return true;
3190 }
3191 
3192 static char *spapr_get_resize_hpt(Object *obj, Error **errp)
3193 {
3194     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3195 
3196     switch (spapr->resize_hpt) {
3197     case SPAPR_RESIZE_HPT_DEFAULT:
3198         return g_strdup("default");
3199     case SPAPR_RESIZE_HPT_DISABLED:
3200         return g_strdup("disabled");
3201     case SPAPR_RESIZE_HPT_ENABLED:
3202         return g_strdup("enabled");
3203     case SPAPR_RESIZE_HPT_REQUIRED:
3204         return g_strdup("required");
3205     }
3206     g_assert_not_reached();
3207 }
3208 
3209 static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp)
3210 {
3211     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3212 
3213     if (strcmp(value, "default") == 0) {
3214         spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT;
3215     } else if (strcmp(value, "disabled") == 0) {
3216         spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
3217     } else if (strcmp(value, "enabled") == 0) {
3218         spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED;
3219     } else if (strcmp(value, "required") == 0) {
3220         spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED;
3221     } else {
3222         error_setg(errp, "Bad value for \"resize-hpt\" property");
3223     }
3224 }
3225 
3226 static void spapr_get_vsmt(Object *obj, Visitor *v, const char *name,
3227                                    void *opaque, Error **errp)
3228 {
3229     visit_type_uint32(v, name, (uint32_t *)opaque, errp);
3230 }
3231 
3232 static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name,
3233                                    void *opaque, Error **errp)
3234 {
3235     visit_type_uint32(v, name, (uint32_t *)opaque, errp);
3236 }
3237 
3238 static char *spapr_get_ic_mode(Object *obj, Error **errp)
3239 {
3240     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3241 
3242     if (spapr->irq == &spapr_irq_xics_legacy) {
3243         return g_strdup("legacy");
3244     } else if (spapr->irq == &spapr_irq_xics) {
3245         return g_strdup("xics");
3246     } else if (spapr->irq == &spapr_irq_xive) {
3247         return g_strdup("xive");
3248     } else if (spapr->irq == &spapr_irq_dual) {
3249         return g_strdup("dual");
3250     }
3251     g_assert_not_reached();
3252 }
3253 
3254 static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp)
3255 {
3256     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3257 
3258     if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
3259         error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode");
3260         return;
3261     }
3262 
3263     /* The legacy IRQ backend can not be set */
3264     if (strcmp(value, "xics") == 0) {
3265         spapr->irq = &spapr_irq_xics;
3266     } else if (strcmp(value, "xive") == 0) {
3267         spapr->irq = &spapr_irq_xive;
3268     } else if (strcmp(value, "dual") == 0) {
3269         spapr->irq = &spapr_irq_dual;
3270     } else {
3271         error_setg(errp, "Bad value for \"ic-mode\" property");
3272     }
3273 }
3274 
3275 static char *spapr_get_host_model(Object *obj, Error **errp)
3276 {
3277     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3278 
3279     return g_strdup(spapr->host_model);
3280 }
3281 
3282 static void spapr_set_host_model(Object *obj, const char *value, Error **errp)
3283 {
3284     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3285 
3286     g_free(spapr->host_model);
3287     spapr->host_model = g_strdup(value);
3288 }
3289 
3290 static char *spapr_get_host_serial(Object *obj, Error **errp)
3291 {
3292     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3293 
3294     return g_strdup(spapr->host_serial);
3295 }
3296 
3297 static void spapr_set_host_serial(Object *obj, const char *value, Error **errp)
3298 {
3299     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3300 
3301     g_free(spapr->host_serial);
3302     spapr->host_serial = g_strdup(value);
3303 }
3304 
3305 static void spapr_instance_init(Object *obj)
3306 {
3307     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3308     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3309 
3310     spapr->htab_fd = -1;
3311     spapr->use_hotplug_event_source = true;
3312     object_property_add_str(obj, "kvm-type",
3313                             spapr_get_kvm_type, spapr_set_kvm_type, NULL);
3314     object_property_set_description(obj, "kvm-type",
3315                                     "Specifies the KVM virtualization mode (HV, PR)",
3316                                     NULL);
3317     object_property_add_bool(obj, "modern-hotplug-events",
3318                             spapr_get_modern_hotplug_events,
3319                             spapr_set_modern_hotplug_events,
3320                             NULL);
3321     object_property_set_description(obj, "modern-hotplug-events",
3322                                     "Use dedicated hotplug event mechanism in"
3323                                     " place of standard EPOW events when possible"
3324                                     " (required for memory hot-unplug support)",
3325                                     NULL);
3326     ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
3327                             "Maximum permitted CPU compatibility mode",
3328                             &error_fatal);
3329 
3330     object_property_add_str(obj, "resize-hpt",
3331                             spapr_get_resize_hpt, spapr_set_resize_hpt, NULL);
3332     object_property_set_description(obj, "resize-hpt",
3333                                     "Resizing of the Hash Page Table (enabled, disabled, required)",
3334                                     NULL);
3335     object_property_add(obj, "vsmt", "uint32", spapr_get_vsmt,
3336                         spapr_set_vsmt, NULL, &spapr->vsmt, &error_abort);
3337     object_property_set_description(obj, "vsmt",
3338                                     "Virtual SMT: KVM behaves as if this were"
3339                                     " the host's SMT mode", &error_abort);
3340     object_property_add_bool(obj, "vfio-no-msix-emulation",
3341                              spapr_get_msix_emulation, NULL, NULL);
3342 
3343     /* The machine class defines the default interrupt controller mode */
3344     spapr->irq = smc->irq;
3345     object_property_add_str(obj, "ic-mode", spapr_get_ic_mode,
3346                             spapr_set_ic_mode, NULL);
3347     object_property_set_description(obj, "ic-mode",
3348                  "Specifies the interrupt controller mode (xics, xive, dual)",
3349                  NULL);
3350 
3351     object_property_add_str(obj, "host-model",
3352         spapr_get_host_model, spapr_set_host_model,
3353         &error_abort);
3354     object_property_set_description(obj, "host-model",
3355         "Set host's model-id to use - none|passthrough|string", &error_abort);
3356     object_property_add_str(obj, "host-serial",
3357         spapr_get_host_serial, spapr_set_host_serial,
3358         &error_abort);
3359     object_property_set_description(obj, "host-serial",
3360         "Set host's system-id to use - none|passthrough|string", &error_abort);
3361 }
3362 
3363 static void spapr_machine_finalizefn(Object *obj)
3364 {
3365     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3366 
3367     g_free(spapr->kvm_type);
3368 }
3369 
3370 void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
3371 {
3372     cpu_synchronize_state(cs);
3373     ppc_cpu_do_system_reset(cs);
3374 }
3375 
3376 static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
3377 {
3378     CPUState *cs;
3379 
3380     CPU_FOREACH(cs) {
3381         async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
3382     }
3383 }
3384 
3385 int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3386                           void *fdt, int *fdt_start_offset, Error **errp)
3387 {
3388     uint64_t addr;
3389     uint32_t node;
3390 
3391     addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE;
3392     node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP,
3393                                     &error_abort);
3394     *fdt_start_offset = spapr_populate_memory_node(fdt, node, addr,
3395                                                    SPAPR_MEMORY_BLOCK_SIZE);
3396     return 0;
3397 }
3398 
3399 static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
3400                            bool dedicated_hp_event_source, Error **errp)
3401 {
3402     SpaprDrc *drc;
3403     uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
3404     int i;
3405     uint64_t addr = addr_start;
3406     bool hotplugged = spapr_drc_hotplugged(dev);
3407     Error *local_err = NULL;
3408 
3409     for (i = 0; i < nr_lmbs; i++) {
3410         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3411                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3412         g_assert(drc);
3413 
3414         spapr_drc_attach(drc, dev, &local_err);
3415         if (local_err) {
3416             while (addr > addr_start) {
3417                 addr -= SPAPR_MEMORY_BLOCK_SIZE;
3418                 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3419                                       addr / SPAPR_MEMORY_BLOCK_SIZE);
3420                 spapr_drc_detach(drc);
3421             }
3422             error_propagate(errp, local_err);
3423             return;
3424         }
3425         if (!hotplugged) {
3426             spapr_drc_reset(drc);
3427         }
3428         addr += SPAPR_MEMORY_BLOCK_SIZE;
3429     }
3430     /* send hotplug notification to the
3431      * guest only in case of hotplugged memory
3432      */
3433     if (hotplugged) {
3434         if (dedicated_hp_event_source) {
3435             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3436                                   addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3437             spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3438                                                    nr_lmbs,
3439                                                    spapr_drc_index(drc));
3440         } else {
3441             spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
3442                                            nr_lmbs);
3443         }
3444     }
3445 }
3446 
3447 static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3448                               Error **errp)
3449 {
3450     Error *local_err = NULL;
3451     SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev);
3452     PCDIMMDevice *dimm = PC_DIMM(dev);
3453     uint64_t size, addr;
3454 
3455     size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort);
3456 
3457     pc_dimm_plug(dimm, MACHINE(ms), &local_err);
3458     if (local_err) {
3459         goto out;
3460     }
3461 
3462     addr = object_property_get_uint(OBJECT(dimm),
3463                                     PC_DIMM_ADDR_PROP, &local_err);
3464     if (local_err) {
3465         goto out_unplug;
3466     }
3467 
3468     spapr_add_lmbs(dev, addr, size, spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT),
3469                    &local_err);
3470     if (local_err) {
3471         goto out_unplug;
3472     }
3473 
3474     return;
3475 
3476 out_unplug:
3477     pc_dimm_unplug(dimm, MACHINE(ms));
3478 out:
3479     error_propagate(errp, local_err);
3480 }
3481 
3482 static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3483                                   Error **errp)
3484 {
3485     const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev);
3486     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3487     PCDIMMDevice *dimm = PC_DIMM(dev);
3488     Error *local_err = NULL;
3489     uint64_t size;
3490     Object *memdev;
3491     hwaddr pagesize;
3492 
3493     if (!smc->dr_lmb_enabled) {
3494         error_setg(errp, "Memory hotplug not supported for this machine");
3495         return;
3496     }
3497 
3498     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
3499     if (local_err) {
3500         error_propagate(errp, local_err);
3501         return;
3502     }
3503 
3504     if (size % SPAPR_MEMORY_BLOCK_SIZE) {
3505         error_setg(errp, "Hotplugged memory size must be a multiple of "
3506                       "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB);
3507         return;
3508     }
3509 
3510     memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP,
3511                                       &error_abort);
3512     pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev));
3513     spapr_check_pagesize(spapr, pagesize, &local_err);
3514     if (local_err) {
3515         error_propagate(errp, local_err);
3516         return;
3517     }
3518 
3519     pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp);
3520 }
3521 
3522 struct SpaprDimmState {
3523     PCDIMMDevice *dimm;
3524     uint32_t nr_lmbs;
3525     QTAILQ_ENTRY(SpaprDimmState) next;
3526 };
3527 
3528 static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s,
3529                                                        PCDIMMDevice *dimm)
3530 {
3531     SpaprDimmState *dimm_state = NULL;
3532 
3533     QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
3534         if (dimm_state->dimm == dimm) {
3535             break;
3536         }
3537     }
3538     return dimm_state;
3539 }
3540 
3541 static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr,
3542                                                       uint32_t nr_lmbs,
3543                                                       PCDIMMDevice *dimm)
3544 {
3545     SpaprDimmState *ds = NULL;
3546 
3547     /*
3548      * If this request is for a DIMM whose removal had failed earlier
3549      * (due to guest's refusal to remove the LMBs), we would have this
3550      * dimm already in the pending_dimm_unplugs list. In that
3551      * case don't add again.
3552      */
3553     ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3554     if (!ds) {
3555         ds = g_malloc0(sizeof(SpaprDimmState));
3556         ds->nr_lmbs = nr_lmbs;
3557         ds->dimm = dimm;
3558         QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next);
3559     }
3560     return ds;
3561 }
3562 
3563 static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr,
3564                                               SpaprDimmState *dimm_state)
3565 {
3566     QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
3567     g_free(dimm_state);
3568 }
3569 
3570 static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms,
3571                                                         PCDIMMDevice *dimm)
3572 {
3573     SpaprDrc *drc;
3574     uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm),
3575                                                   &error_abort);
3576     uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3577     uint32_t avail_lmbs = 0;
3578     uint64_t addr_start, addr;
3579     int i;
3580 
3581     addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3582                                          &error_abort);
3583 
3584     addr = addr_start;
3585     for (i = 0; i < nr_lmbs; i++) {
3586         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3587                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3588         g_assert(drc);
3589         if (drc->dev) {
3590             avail_lmbs++;
3591         }
3592         addr += SPAPR_MEMORY_BLOCK_SIZE;
3593     }
3594 
3595     return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm);
3596 }
3597 
3598 /* Callback to be called during DRC release. */
3599 void spapr_lmb_release(DeviceState *dev)
3600 {
3601     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3602     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl);
3603     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3604 
3605     /* This information will get lost if a migration occurs
3606      * during the unplug process. In this case recover it. */
3607     if (ds == NULL) {
3608         ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
3609         g_assert(ds);
3610         /* The DRC being examined by the caller at least must be counted */
3611         g_assert(ds->nr_lmbs);
3612     }
3613 
3614     if (--ds->nr_lmbs) {
3615         return;
3616     }
3617 
3618     /*
3619      * Now that all the LMBs have been removed by the guest, call the
3620      * unplug handler chain. This can never fail.
3621      */
3622     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3623     object_unparent(OBJECT(dev));
3624 }
3625 
3626 static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3627 {
3628     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3629     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3630 
3631     pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev));
3632     object_property_set_bool(OBJECT(dev), false, "realized", NULL);
3633     spapr_pending_dimm_unplugs_remove(spapr, ds);
3634 }
3635 
3636 static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
3637                                         DeviceState *dev, Error **errp)
3638 {
3639     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3640     Error *local_err = NULL;
3641     PCDIMMDevice *dimm = PC_DIMM(dev);
3642     uint32_t nr_lmbs;
3643     uint64_t size, addr_start, addr;
3644     int i;
3645     SpaprDrc *drc;
3646 
3647     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3648     nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3649 
3650     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3651                                          &local_err);
3652     if (local_err) {
3653         goto out;
3654     }
3655 
3656     /*
3657      * An existing pending dimm state for this DIMM means that there is an
3658      * unplug operation in progress, waiting for the spapr_lmb_release
3659      * callback to complete the job (BQL can't cover that far). In this case,
3660      * bail out to avoid detaching DRCs that were already released.
3661      */
3662     if (spapr_pending_dimm_unplugs_find(spapr, dimm)) {
3663         error_setg(&local_err,
3664                    "Memory unplug already in progress for device %s",
3665                    dev->id);
3666         goto out;
3667     }
3668 
3669     spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm);
3670 
3671     addr = addr_start;
3672     for (i = 0; i < nr_lmbs; i++) {
3673         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3674                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3675         g_assert(drc);
3676 
3677         spapr_drc_detach(drc);
3678         addr += SPAPR_MEMORY_BLOCK_SIZE;
3679     }
3680 
3681     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3682                           addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3683     spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3684                                               nr_lmbs, spapr_drc_index(drc));
3685 out:
3686     error_propagate(errp, local_err);
3687 }
3688 
3689 /* Callback to be called during DRC release. */
3690 void spapr_core_release(DeviceState *dev)
3691 {
3692     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3693 
3694     /* Call the unplug handler chain. This can never fail. */
3695     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3696     object_unparent(OBJECT(dev));
3697 }
3698 
3699 static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3700 {
3701     MachineState *ms = MACHINE(hotplug_dev);
3702     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
3703     CPUCore *cc = CPU_CORE(dev);
3704     CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
3705 
3706     if (smc->pre_2_10_has_unused_icps) {
3707         SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
3708         int i;
3709 
3710         for (i = 0; i < cc->nr_threads; i++) {
3711             CPUState *cs = CPU(sc->threads[i]);
3712 
3713             pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
3714         }
3715     }
3716 
3717     assert(core_slot);
3718     core_slot->cpu = NULL;
3719     object_property_set_bool(OBJECT(dev), false, "realized", NULL);
3720 }
3721 
3722 static
3723 void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
3724                                Error **errp)
3725 {
3726     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3727     int index;
3728     SpaprDrc *drc;
3729     CPUCore *cc = CPU_CORE(dev);
3730 
3731     if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
3732         error_setg(errp, "Unable to find CPU core with core-id: %d",
3733                    cc->core_id);
3734         return;
3735     }
3736     if (index == 0) {
3737         error_setg(errp, "Boot CPU core may not be unplugged");
3738         return;
3739     }
3740 
3741     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3742                           spapr_vcpu_id(spapr, cc->core_id));
3743     g_assert(drc);
3744 
3745     spapr_drc_detach(drc);
3746 
3747     spapr_hotplug_req_remove_by_index(drc);
3748 }
3749 
3750 int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3751                            void *fdt, int *fdt_start_offset, Error **errp)
3752 {
3753     SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev);
3754     CPUState *cs = CPU(core->threads[0]);
3755     PowerPCCPU *cpu = POWERPC_CPU(cs);
3756     DeviceClass *dc = DEVICE_GET_CLASS(cs);
3757     int id = spapr_get_vcpu_id(cpu);
3758     char *nodename;
3759     int offset;
3760 
3761     nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
3762     offset = fdt_add_subnode(fdt, 0, nodename);
3763     g_free(nodename);
3764 
3765     spapr_populate_cpu_dt(cs, fdt, offset, spapr);
3766 
3767     *fdt_start_offset = offset;
3768     return 0;
3769 }
3770 
3771 static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3772                             Error **errp)
3773 {
3774     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3775     MachineClass *mc = MACHINE_GET_CLASS(spapr);
3776     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3777     SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev));
3778     CPUCore *cc = CPU_CORE(dev);
3779     CPUState *cs;
3780     SpaprDrc *drc;
3781     Error *local_err = NULL;
3782     CPUArchId *core_slot;
3783     int index;
3784     bool hotplugged = spapr_drc_hotplugged(dev);
3785 
3786     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3787     if (!core_slot) {
3788         error_setg(errp, "Unable to find CPU core with core-id: %d",
3789                    cc->core_id);
3790         return;
3791     }
3792     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3793                           spapr_vcpu_id(spapr, cc->core_id));
3794 
3795     g_assert(drc || !mc->has_hotpluggable_cpus);
3796 
3797     if (drc) {
3798         spapr_drc_attach(drc, dev, &local_err);
3799         if (local_err) {
3800             error_propagate(errp, local_err);
3801             return;
3802         }
3803 
3804         if (hotplugged) {
3805             /*
3806              * Send hotplug notification interrupt to the guest only
3807              * in case of hotplugged CPUs.
3808              */
3809             spapr_hotplug_req_add_by_index(drc);
3810         } else {
3811             spapr_drc_reset(drc);
3812         }
3813     }
3814 
3815     core_slot->cpu = OBJECT(dev);
3816 
3817     if (smc->pre_2_10_has_unused_icps) {
3818         int i;
3819 
3820         for (i = 0; i < cc->nr_threads; i++) {
3821             cs = CPU(core->threads[i]);
3822             pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
3823         }
3824     }
3825 }
3826 
3827 static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3828                                 Error **errp)
3829 {
3830     MachineState *machine = MACHINE(OBJECT(hotplug_dev));
3831     MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
3832     Error *local_err = NULL;
3833     CPUCore *cc = CPU_CORE(dev);
3834     const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type);
3835     const char *type = object_get_typename(OBJECT(dev));
3836     CPUArchId *core_slot;
3837     int index;
3838 
3839     if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
3840         error_setg(&local_err, "CPU hotplug not supported for this machine");
3841         goto out;
3842     }
3843 
3844     if (strcmp(base_core_type, type)) {
3845         error_setg(&local_err, "CPU core type should be %s", base_core_type);
3846         goto out;
3847     }
3848 
3849     if (cc->core_id % smp_threads) {
3850         error_setg(&local_err, "invalid core id %d", cc->core_id);
3851         goto out;
3852     }
3853 
3854     /*
3855      * In general we should have homogeneous threads-per-core, but old
3856      * (pre hotplug support) machine types allow the last core to have
3857      * reduced threads as a compatibility hack for when we allowed
3858      * total vcpus not a multiple of threads-per-core.
3859      */
3860     if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
3861         error_setg(&local_err, "invalid nr-threads %d, must be %d",
3862                    cc->nr_threads, smp_threads);
3863         goto out;
3864     }
3865 
3866     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3867     if (!core_slot) {
3868         error_setg(&local_err, "core id %d out of range", cc->core_id);
3869         goto out;
3870     }
3871 
3872     if (core_slot->cpu) {
3873         error_setg(&local_err, "core %d already populated", cc->core_id);
3874         goto out;
3875     }
3876 
3877     numa_cpu_pre_plug(core_slot, dev, &local_err);
3878 
3879 out:
3880     error_propagate(errp, local_err);
3881 }
3882 
3883 int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3884                           void *fdt, int *fdt_start_offset, Error **errp)
3885 {
3886     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev);
3887     int intc_phandle;
3888 
3889     intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp);
3890     if (intc_phandle <= 0) {
3891         return -1;
3892     }
3893 
3894     if (spapr_populate_pci_dt(sphb, intc_phandle, fdt, spapr->irq->nr_msis,
3895                               fdt_start_offset)) {
3896         error_setg(errp, "unable to create FDT node for PHB %d", sphb->index);
3897         return -1;
3898     }
3899 
3900     /* generally SLOF creates these, for hotplug it's up to QEMU */
3901     _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci"));
3902 
3903     return 0;
3904 }
3905 
3906 static void spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3907                                Error **errp)
3908 {
3909     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3910     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
3911     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3912     const unsigned windows_supported = spapr_phb_windows_supported(sphb);
3913 
3914     if (dev->hotplugged && !smc->dr_phb_enabled) {
3915         error_setg(errp, "PHB hotplug not supported for this machine");
3916         return;
3917     }
3918 
3919     if (sphb->index == (uint32_t)-1) {
3920         error_setg(errp, "\"index\" for PAPR PHB is mandatory");
3921         return;
3922     }
3923 
3924     /*
3925      * This will check that sphb->index doesn't exceed the maximum number of
3926      * PHBs for the current machine type.
3927      */
3928     smc->phb_placement(spapr, sphb->index,
3929                        &sphb->buid, &sphb->io_win_addr,
3930                        &sphb->mem_win_addr, &sphb->mem64_win_addr,
3931                        windows_supported, sphb->dma_liobn, errp);
3932 }
3933 
3934 static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3935                            Error **errp)
3936 {
3937     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3938     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3939     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
3940     SpaprDrc *drc;
3941     bool hotplugged = spapr_drc_hotplugged(dev);
3942     Error *local_err = NULL;
3943 
3944     if (!smc->dr_phb_enabled) {
3945         return;
3946     }
3947 
3948     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
3949     /* hotplug hooks should check it's enabled before getting this far */
3950     assert(drc);
3951 
3952     spapr_drc_attach(drc, DEVICE(dev), &local_err);
3953     if (local_err) {
3954         error_propagate(errp, local_err);
3955         return;
3956     }
3957 
3958     if (hotplugged) {
3959         spapr_hotplug_req_add_by_index(drc);
3960     } else {
3961         spapr_drc_reset(drc);
3962     }
3963 }
3964 
3965 void spapr_phb_release(DeviceState *dev)
3966 {
3967     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3968 
3969     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3970     object_unparent(OBJECT(dev));
3971 }
3972 
3973 static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3974 {
3975     object_property_set_bool(OBJECT(dev), false, "realized", NULL);
3976 }
3977 
3978 static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev,
3979                                      DeviceState *dev, Error **errp)
3980 {
3981     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
3982     SpaprDrc *drc;
3983 
3984     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
3985     assert(drc);
3986 
3987     if (!spapr_drc_unplug_requested(drc)) {
3988         spapr_drc_detach(drc);
3989         spapr_hotplug_req_remove_by_index(drc);
3990     }
3991 }
3992 
3993 static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
3994                                       DeviceState *dev, Error **errp)
3995 {
3996     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
3997         spapr_memory_plug(hotplug_dev, dev, errp);
3998     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3999         spapr_core_plug(hotplug_dev, dev, errp);
4000     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4001         spapr_phb_plug(hotplug_dev, dev, errp);
4002     }
4003 }
4004 
4005 static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
4006                                         DeviceState *dev, Error **errp)
4007 {
4008     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4009         spapr_memory_unplug(hotplug_dev, dev);
4010     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4011         spapr_core_unplug(hotplug_dev, dev);
4012     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4013         spapr_phb_unplug(hotplug_dev, dev);
4014     }
4015 }
4016 
4017 static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
4018                                                 DeviceState *dev, Error **errp)
4019 {
4020     SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev));
4021     MachineClass *mc = MACHINE_GET_CLASS(sms);
4022     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4023 
4024     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4025         if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) {
4026             spapr_memory_unplug_request(hotplug_dev, dev, errp);
4027         } else {
4028             /* NOTE: this means there is a window after guest reset, prior to
4029              * CAS negotiation, where unplug requests will fail due to the
4030              * capability not being detected yet. This is a bit different than
4031              * the case with PCI unplug, where the events will be queued and
4032              * eventually handled by the guest after boot
4033              */
4034             error_setg(errp, "Memory hot unplug not supported for this guest");
4035         }
4036     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4037         if (!mc->has_hotpluggable_cpus) {
4038             error_setg(errp, "CPU hot unplug not supported on this machine");
4039             return;
4040         }
4041         spapr_core_unplug_request(hotplug_dev, dev, errp);
4042     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4043         if (!smc->dr_phb_enabled) {
4044             error_setg(errp, "PHB hot unplug not supported on this machine");
4045             return;
4046         }
4047         spapr_phb_unplug_request(hotplug_dev, dev, errp);
4048     }
4049 }
4050 
4051 static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
4052                                           DeviceState *dev, Error **errp)
4053 {
4054     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4055         spapr_memory_pre_plug(hotplug_dev, dev, errp);
4056     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4057         spapr_core_pre_plug(hotplug_dev, dev, errp);
4058     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4059         spapr_phb_pre_plug(hotplug_dev, dev, errp);
4060     }
4061 }
4062 
4063 static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
4064                                                  DeviceState *dev)
4065 {
4066     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
4067         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) ||
4068         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4069         return HOTPLUG_HANDLER(machine);
4070     }
4071     return NULL;
4072 }
4073 
4074 static CpuInstanceProperties
4075 spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
4076 {
4077     CPUArchId *core_slot;
4078     MachineClass *mc = MACHINE_GET_CLASS(machine);
4079 
4080     /* make sure possible_cpu are intialized */
4081     mc->possible_cpu_arch_ids(machine);
4082     /* get CPU core slot containing thread that matches cpu_index */
4083     core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
4084     assert(core_slot);
4085     return core_slot->props;
4086 }
4087 
4088 static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx)
4089 {
4090     return idx / smp_cores % nb_numa_nodes;
4091 }
4092 
4093 static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
4094 {
4095     int i;
4096     const char *core_type;
4097     int spapr_max_cores = max_cpus / smp_threads;
4098     MachineClass *mc = MACHINE_GET_CLASS(machine);
4099 
4100     if (!mc->has_hotpluggable_cpus) {
4101         spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
4102     }
4103     if (machine->possible_cpus) {
4104         assert(machine->possible_cpus->len == spapr_max_cores);
4105         return machine->possible_cpus;
4106     }
4107 
4108     core_type = spapr_get_cpu_core_type(machine->cpu_type);
4109     if (!core_type) {
4110         error_report("Unable to find sPAPR CPU Core definition");
4111         exit(1);
4112     }
4113 
4114     machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
4115                              sizeof(CPUArchId) * spapr_max_cores);
4116     machine->possible_cpus->len = spapr_max_cores;
4117     for (i = 0; i < machine->possible_cpus->len; i++) {
4118         int core_id = i * smp_threads;
4119 
4120         machine->possible_cpus->cpus[i].type = core_type;
4121         machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
4122         machine->possible_cpus->cpus[i].arch_id = core_id;
4123         machine->possible_cpus->cpus[i].props.has_core_id = true;
4124         machine->possible_cpus->cpus[i].props.core_id = core_id;
4125     }
4126     return machine->possible_cpus;
4127 }
4128 
4129 static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
4130                                 uint64_t *buid, hwaddr *pio,
4131                                 hwaddr *mmio32, hwaddr *mmio64,
4132                                 unsigned n_dma, uint32_t *liobns, Error **errp)
4133 {
4134     /*
4135      * New-style PHB window placement.
4136      *
4137      * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
4138      * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
4139      * windows.
4140      *
4141      * Some guest kernels can't work with MMIO windows above 1<<46
4142      * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
4143      *
4144      * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
4145      * PHB stacked together.  (32TiB+2GiB)..(32TiB+64GiB) contains the
4146      * 2GiB 32-bit MMIO windows for each PHB.  Then 33..64TiB has the
4147      * 1TiB 64-bit MMIO windows for each PHB.
4148      */
4149     const uint64_t base_buid = 0x800000020000000ULL;
4150     int i;
4151 
4152     /* Sanity check natural alignments */
4153     QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4154     QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4155     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
4156     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
4157     /* Sanity check bounds */
4158     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
4159                       SPAPR_PCI_MEM32_WIN_SIZE);
4160     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
4161                       SPAPR_PCI_MEM64_WIN_SIZE);
4162 
4163     if (index >= SPAPR_MAX_PHBS) {
4164         error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
4165                    SPAPR_MAX_PHBS - 1);
4166         return;
4167     }
4168 
4169     *buid = base_buid + index;
4170     for (i = 0; i < n_dma; ++i) {
4171         liobns[i] = SPAPR_PCI_LIOBN(index, i);
4172     }
4173 
4174     *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
4175     *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
4176     *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
4177 }
4178 
4179 static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
4180 {
4181     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4182 
4183     return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
4184 }
4185 
4186 static void spapr_ics_resend(XICSFabric *dev)
4187 {
4188     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4189 
4190     ics_resend(spapr->ics);
4191 }
4192 
4193 static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
4194 {
4195     PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
4196 
4197     return cpu ? spapr_cpu_state(cpu)->icp : NULL;
4198 }
4199 
4200 static void spapr_pic_print_info(InterruptStatsProvider *obj,
4201                                  Monitor *mon)
4202 {
4203     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
4204 
4205     spapr->irq->print_info(spapr, mon);
4206 }
4207 
4208 int spapr_get_vcpu_id(PowerPCCPU *cpu)
4209 {
4210     return cpu->vcpu_id;
4211 }
4212 
4213 void spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp)
4214 {
4215     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
4216     int vcpu_id;
4217 
4218     vcpu_id = spapr_vcpu_id(spapr, cpu_index);
4219 
4220     if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) {
4221         error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id);
4222         error_append_hint(errp, "Adjust the number of cpus to %d "
4223                           "or try to raise the number of threads per core\n",
4224                           vcpu_id * smp_threads / spapr->vsmt);
4225         return;
4226     }
4227 
4228     cpu->vcpu_id = vcpu_id;
4229 }
4230 
4231 PowerPCCPU *spapr_find_cpu(int vcpu_id)
4232 {
4233     CPUState *cs;
4234 
4235     CPU_FOREACH(cs) {
4236         PowerPCCPU *cpu = POWERPC_CPU(cs);
4237 
4238         if (spapr_get_vcpu_id(cpu) == vcpu_id) {
4239             return cpu;
4240         }
4241     }
4242 
4243     return NULL;
4244 }
4245 
4246 static void spapr_machine_class_init(ObjectClass *oc, void *data)
4247 {
4248     MachineClass *mc = MACHINE_CLASS(oc);
4249     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
4250     FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
4251     NMIClass *nc = NMI_CLASS(oc);
4252     HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
4253     PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
4254     XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
4255     InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
4256 
4257     mc->desc = "pSeries Logical Partition (PAPR compliant)";
4258     mc->ignore_boot_device_suffixes = true;
4259 
4260     /*
4261      * We set up the default / latest behaviour here.  The class_init
4262      * functions for the specific versioned machine types can override
4263      * these details for backwards compatibility
4264      */
4265     mc->init = spapr_machine_init;
4266     mc->reset = spapr_machine_reset;
4267     mc->block_default_type = IF_SCSI;
4268     mc->max_cpus = 1024;
4269     mc->no_parallel = 1;
4270     mc->default_boot_order = "";
4271     mc->default_ram_size = 512 * MiB;
4272     mc->default_display = "std";
4273     mc->kvm_type = spapr_kvm_type;
4274     machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
4275     mc->pci_allow_0_address = true;
4276     assert(!mc->get_hotplug_handler);
4277     mc->get_hotplug_handler = spapr_get_hotplug_handler;
4278     hc->pre_plug = spapr_machine_device_pre_plug;
4279     hc->plug = spapr_machine_device_plug;
4280     mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
4281     mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id;
4282     mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
4283     hc->unplug_request = spapr_machine_device_unplug_request;
4284     hc->unplug = spapr_machine_device_unplug;
4285 
4286     smc->dr_lmb_enabled = true;
4287     smc->update_dt_enabled = true;
4288     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0");
4289     mc->has_hotpluggable_cpus = true;
4290     smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
4291     fwc->get_dev_path = spapr_get_fw_dev_path;
4292     nc->nmi_monitor_handler = spapr_nmi;
4293     smc->phb_placement = spapr_phb_placement;
4294     vhc->hypercall = emulate_spapr_hypercall;
4295     vhc->hpt_mask = spapr_hpt_mask;
4296     vhc->map_hptes = spapr_map_hptes;
4297     vhc->unmap_hptes = spapr_unmap_hptes;
4298     vhc->store_hpte = spapr_store_hpte;
4299     vhc->get_pate = spapr_get_pate;
4300     vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
4301     xic->ics_get = spapr_ics_get;
4302     xic->ics_resend = spapr_ics_resend;
4303     xic->icp_get = spapr_icp_get;
4304     ispc->print_info = spapr_pic_print_info;
4305     /* Force NUMA node memory size to be a multiple of
4306      * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
4307      * in which LMBs are represented and hot-added
4308      */
4309     mc->numa_mem_align_shift = 28;
4310 
4311     smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF;
4312     smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON;
4313     smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON;
4314     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
4315     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
4316     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND;
4317     smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */
4318     smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF;
4319     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON;
4320     smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF;
4321     spapr_caps_add_properties(smc, &error_abort);
4322     smc->irq = &spapr_irq_xics;
4323     smc->dr_phb_enabled = true;
4324 }
4325 
4326 static const TypeInfo spapr_machine_info = {
4327     .name          = TYPE_SPAPR_MACHINE,
4328     .parent        = TYPE_MACHINE,
4329     .abstract      = true,
4330     .instance_size = sizeof(SpaprMachineState),
4331     .instance_init = spapr_instance_init,
4332     .instance_finalize = spapr_machine_finalizefn,
4333     .class_size    = sizeof(SpaprMachineClass),
4334     .class_init    = spapr_machine_class_init,
4335     .interfaces = (InterfaceInfo[]) {
4336         { TYPE_FW_PATH_PROVIDER },
4337         { TYPE_NMI },
4338         { TYPE_HOTPLUG_HANDLER },
4339         { TYPE_PPC_VIRTUAL_HYPERVISOR },
4340         { TYPE_XICS_FABRIC },
4341         { TYPE_INTERRUPT_STATS_PROVIDER },
4342         { }
4343     },
4344 };
4345 
4346 #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest)                 \
4347     static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
4348                                                     void *data)      \
4349     {                                                                \
4350         MachineClass *mc = MACHINE_CLASS(oc);                        \
4351         spapr_machine_##suffix##_class_options(mc);                  \
4352         if (latest) {                                                \
4353             mc->alias = "pseries";                                   \
4354             mc->is_default = 1;                                      \
4355         }                                                            \
4356     }                                                                \
4357     static const TypeInfo spapr_machine_##suffix##_info = {          \
4358         .name = MACHINE_TYPE_NAME("pseries-" verstr),                \
4359         .parent = TYPE_SPAPR_MACHINE,                                \
4360         .class_init = spapr_machine_##suffix##_class_init,           \
4361     };                                                               \
4362     static void spapr_machine_register_##suffix(void)                \
4363     {                                                                \
4364         type_register(&spapr_machine_##suffix##_info);               \
4365     }                                                                \
4366     type_init(spapr_machine_register_##suffix)
4367 
4368 /*
4369  * pseries-4.0
4370  */
4371 static void spapr_machine_4_0_class_options(MachineClass *mc)
4372 {
4373     /* Defaults for the latest behaviour inherited from the base class */
4374 }
4375 
4376 DEFINE_SPAPR_MACHINE(4_0, "4.0", true);
4377 
4378 /*
4379  * pseries-3.1
4380  */
4381 static void spapr_machine_3_1_class_options(MachineClass *mc)
4382 {
4383     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4384     static GlobalProperty compat[] = {
4385         { TYPE_SPAPR_MACHINE, "host-model", "passthrough" },
4386         { TYPE_SPAPR_MACHINE, "host-serial", "passthrough" },
4387     };
4388 
4389     spapr_machine_4_0_class_options(mc);
4390     compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
4391     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4392 
4393     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
4394     smc->update_dt_enabled = false;
4395     smc->dr_phb_enabled = false;
4396     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
4397     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
4398     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
4399     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
4400 }
4401 
4402 DEFINE_SPAPR_MACHINE(3_1, "3.1", false);
4403 
4404 /*
4405  * pseries-3.0
4406  */
4407 
4408 static void spapr_machine_3_0_class_options(MachineClass *mc)
4409 {
4410     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4411 
4412     spapr_machine_3_1_class_options(mc);
4413     compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
4414 
4415     smc->legacy_irq_allocation = true;
4416     smc->irq = &spapr_irq_xics_legacy;
4417 }
4418 
4419 DEFINE_SPAPR_MACHINE(3_0, "3.0", false);
4420 
4421 /*
4422  * pseries-2.12
4423  */
4424 static void spapr_machine_2_12_class_options(MachineClass *mc)
4425 {
4426     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4427     static GlobalProperty compat[] = {
4428         { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" },
4429         { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" },
4430     };
4431 
4432     spapr_machine_3_0_class_options(mc);
4433     compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len);
4434     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4435 
4436     /* We depend on kvm_enabled() to choose a default value for the
4437      * hpt-max-page-size capability. Of course we can't do it here
4438      * because this is too early and the HW accelerator isn't initialzed
4439      * yet. Postpone this to machine init (see default_caps_with_cpu()).
4440      */
4441     smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0;
4442 }
4443 
4444 DEFINE_SPAPR_MACHINE(2_12, "2.12", false);
4445 
4446 static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc)
4447 {
4448     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4449 
4450     spapr_machine_2_12_class_options(mc);
4451     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
4452     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
4453     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD;
4454 }
4455 
4456 DEFINE_SPAPR_MACHINE(2_12_sxxm, "2.12-sxxm", false);
4457 
4458 /*
4459  * pseries-2.11
4460  */
4461 
4462 static void spapr_machine_2_11_class_options(MachineClass *mc)
4463 {
4464     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4465 
4466     spapr_machine_2_12_class_options(mc);
4467     smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON;
4468     compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len);
4469 }
4470 
4471 DEFINE_SPAPR_MACHINE(2_11, "2.11", false);
4472 
4473 /*
4474  * pseries-2.10
4475  */
4476 
4477 static void spapr_machine_2_10_class_options(MachineClass *mc)
4478 {
4479     spapr_machine_2_11_class_options(mc);
4480     compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len);
4481 }
4482 
4483 DEFINE_SPAPR_MACHINE(2_10, "2.10", false);
4484 
4485 /*
4486  * pseries-2.9
4487  */
4488 
4489 static void spapr_machine_2_9_class_options(MachineClass *mc)
4490 {
4491     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4492     static GlobalProperty compat[] = {
4493         { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" },
4494     };
4495 
4496     spapr_machine_2_10_class_options(mc);
4497     compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len);
4498     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4499     mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram;
4500     smc->pre_2_10_has_unused_icps = true;
4501     smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED;
4502 }
4503 
4504 DEFINE_SPAPR_MACHINE(2_9, "2.9", false);
4505 
4506 /*
4507  * pseries-2.8
4508  */
4509 
4510 static void spapr_machine_2_8_class_options(MachineClass *mc)
4511 {
4512     static GlobalProperty compat[] = {
4513         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" },
4514     };
4515 
4516     spapr_machine_2_9_class_options(mc);
4517     compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len);
4518     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4519     mc->numa_mem_align_shift = 23;
4520 }
4521 
4522 DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
4523 
4524 /*
4525  * pseries-2.7
4526  */
4527 
4528 static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
4529                               uint64_t *buid, hwaddr *pio,
4530                               hwaddr *mmio32, hwaddr *mmio64,
4531                               unsigned n_dma, uint32_t *liobns, Error **errp)
4532 {
4533     /* Legacy PHB placement for pseries-2.7 and earlier machine types */
4534     const uint64_t base_buid = 0x800000020000000ULL;
4535     const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */
4536     const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */
4537     const hwaddr pio_offset = 0x80000000; /* 2 GiB */
4538     const uint32_t max_index = 255;
4539     const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */
4540 
4541     uint64_t ram_top = MACHINE(spapr)->ram_size;
4542     hwaddr phb0_base, phb_base;
4543     int i;
4544 
4545     /* Do we have device memory? */
4546     if (MACHINE(spapr)->maxram_size > ram_top) {
4547         /* Can't just use maxram_size, because there may be an
4548          * alignment gap between normal and device memory regions
4549          */
4550         ram_top = MACHINE(spapr)->device_memory->base +
4551             memory_region_size(&MACHINE(spapr)->device_memory->mr);
4552     }
4553 
4554     phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
4555 
4556     if (index > max_index) {
4557         error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
4558                    max_index);
4559         return;
4560     }
4561 
4562     *buid = base_buid + index;
4563     for (i = 0; i < n_dma; ++i) {
4564         liobns[i] = SPAPR_PCI_LIOBN(index, i);
4565     }
4566 
4567     phb_base = phb0_base + index * phb_spacing;
4568     *pio = phb_base + pio_offset;
4569     *mmio32 = phb_base + mmio_offset;
4570     /*
4571      * We don't set the 64-bit MMIO window, relying on the PHB's
4572      * fallback behaviour of automatically splitting a large "32-bit"
4573      * window into contiguous 32-bit and 64-bit windows
4574      */
4575 }
4576 
4577 static void spapr_machine_2_7_class_options(MachineClass *mc)
4578 {
4579     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4580     static GlobalProperty compat[] = {
4581         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", },
4582         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", },
4583         { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", },
4584         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", },
4585     };
4586 
4587     spapr_machine_2_8_class_options(mc);
4588     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3");
4589     mc->default_machine_opts = "modern-hotplug-events=off";
4590     compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len);
4591     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4592     smc->phb_placement = phb_placement_2_7;
4593 }
4594 
4595 DEFINE_SPAPR_MACHINE(2_7, "2.7", false);
4596 
4597 /*
4598  * pseries-2.6
4599  */
4600 
4601 static void spapr_machine_2_6_class_options(MachineClass *mc)
4602 {
4603     static GlobalProperty compat[] = {
4604         { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" },
4605     };
4606 
4607     spapr_machine_2_7_class_options(mc);
4608     mc->has_hotpluggable_cpus = false;
4609     compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len);
4610     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4611 }
4612 
4613 DEFINE_SPAPR_MACHINE(2_6, "2.6", false);
4614 
4615 /*
4616  * pseries-2.5
4617  */
4618 
4619 static void spapr_machine_2_5_class_options(MachineClass *mc)
4620 {
4621     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4622     static GlobalProperty compat[] = {
4623         { "spapr-vlan", "use-rx-buffer-pools", "off" },
4624     };
4625 
4626     spapr_machine_2_6_class_options(mc);
4627     smc->use_ohci_by_default = true;
4628     compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len);
4629     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4630 }
4631 
4632 DEFINE_SPAPR_MACHINE(2_5, "2.5", false);
4633 
4634 /*
4635  * pseries-2.4
4636  */
4637 
4638 static void spapr_machine_2_4_class_options(MachineClass *mc)
4639 {
4640     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4641 
4642     spapr_machine_2_5_class_options(mc);
4643     smc->dr_lmb_enabled = false;
4644     compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len);
4645 }
4646 
4647 DEFINE_SPAPR_MACHINE(2_4, "2.4", false);
4648 
4649 /*
4650  * pseries-2.3
4651  */
4652 
4653 static void spapr_machine_2_3_class_options(MachineClass *mc)
4654 {
4655     static GlobalProperty compat[] = {
4656         { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" },
4657     };
4658     spapr_machine_2_4_class_options(mc);
4659     compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len);
4660     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4661 }
4662 DEFINE_SPAPR_MACHINE(2_3, "2.3", false);
4663 
4664 /*
4665  * pseries-2.2
4666  */
4667 
4668 static void spapr_machine_2_2_class_options(MachineClass *mc)
4669 {
4670     static GlobalProperty compat[] = {
4671         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" },
4672     };
4673 
4674     spapr_machine_2_3_class_options(mc);
4675     compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len);
4676     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4677     mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on";
4678 }
4679 DEFINE_SPAPR_MACHINE(2_2, "2.2", false);
4680 
4681 /*
4682  * pseries-2.1
4683  */
4684 
4685 static void spapr_machine_2_1_class_options(MachineClass *mc)
4686 {
4687     spapr_machine_2_2_class_options(mc);
4688     compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len);
4689 }
4690 DEFINE_SPAPR_MACHINE(2_1, "2.1", false);
4691 
4692 static void spapr_machine_register_types(void)
4693 {
4694     type_register_static(&spapr_machine_info);
4695 }
4696 
4697 type_init(spapr_machine_register_types)
4698