xref: /openbmc/qemu/hw/ppc/spapr.c (revision bac4711b)
1 /*
2  * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3  *
4  * Copyright (c) 2004-2007 Fabrice Bellard
5  * Copyright (c) 2007 Jocelyn Mayer
6  * Copyright (c) 2010 David Gibson, IBM Corporation.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "qemu/datadir.h"
29 #include "qemu/memalign.h"
30 #include "qemu/guest-random.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-events-machine.h"
33 #include "qapi/qapi-events-qdev.h"
34 #include "qapi/visitor.h"
35 #include "sysemu/sysemu.h"
36 #include "sysemu/hostmem.h"
37 #include "sysemu/numa.h"
38 #include "sysemu/qtest.h"
39 #include "sysemu/reset.h"
40 #include "sysemu/runstate.h"
41 #include "qemu/log.h"
42 #include "hw/fw-path-provider.h"
43 #include "elf.h"
44 #include "net/net.h"
45 #include "sysemu/device_tree.h"
46 #include "sysemu/cpus.h"
47 #include "sysemu/hw_accel.h"
48 #include "kvm_ppc.h"
49 #include "migration/misc.h"
50 #include "migration/qemu-file-types.h"
51 #include "migration/global_state.h"
52 #include "migration/register.h"
53 #include "migration/blocker.h"
54 #include "mmu-hash64.h"
55 #include "mmu-book3s-v3.h"
56 #include "cpu-models.h"
57 #include "hw/core/cpu.h"
58 
59 #include "hw/ppc/ppc.h"
60 #include "hw/loader.h"
61 
62 #include "hw/ppc/fdt.h"
63 #include "hw/ppc/spapr.h"
64 #include "hw/ppc/spapr_nested.h"
65 #include "hw/ppc/spapr_vio.h"
66 #include "hw/ppc/vof.h"
67 #include "hw/qdev-properties.h"
68 #include "hw/pci-host/spapr.h"
69 #include "hw/pci/msi.h"
70 
71 #include "hw/pci/pci.h"
72 #include "hw/scsi/scsi.h"
73 #include "hw/virtio/virtio-scsi.h"
74 #include "hw/virtio/vhost-scsi-common.h"
75 
76 #include "exec/ram_addr.h"
77 #include "hw/usb.h"
78 #include "qemu/config-file.h"
79 #include "qemu/error-report.h"
80 #include "trace.h"
81 #include "hw/nmi.h"
82 #include "hw/intc/intc.h"
83 
84 #include "hw/ppc/spapr_cpu_core.h"
85 #include "hw/mem/memory-device.h"
86 #include "hw/ppc/spapr_tpm_proxy.h"
87 #include "hw/ppc/spapr_nvdimm.h"
88 #include "hw/ppc/spapr_numa.h"
89 #include "hw/ppc/pef.h"
90 
91 #include "monitor/monitor.h"
92 
93 #include <libfdt.h>
94 
95 /* SLOF memory layout:
96  *
97  * SLOF raw image loaded at 0, copies its romfs right below the flat
98  * device-tree, then position SLOF itself 31M below that
99  *
100  * So we set FW_OVERHEAD to 40MB which should account for all of that
101  * and more
102  *
103  * We load our kernel at 4M, leaving space for SLOF initial image
104  */
105 #define FDT_MAX_ADDR            0x80000000 /* FDT must stay below that */
106 #define FW_MAX_SIZE             0x400000
107 #define FW_FILE_NAME            "slof.bin"
108 #define FW_FILE_NAME_VOF        "vof.bin"
109 #define FW_OVERHEAD             0x2800000
110 #define KERNEL_LOAD_ADDR        FW_MAX_SIZE
111 
112 #define MIN_RMA_SLOF            (128 * MiB)
113 
114 #define PHANDLE_INTC            0x00001111
115 
116 /* These two functions implement the VCPU id numbering: one to compute them
117  * all and one to identify thread 0 of a VCORE. Any change to the first one
118  * is likely to have an impact on the second one, so let's keep them close.
119  */
120 static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index)
121 {
122     MachineState *ms = MACHINE(spapr);
123     unsigned int smp_threads = ms->smp.threads;
124 
125     assert(spapr->vsmt);
126     return
127         (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;
128 }
129 static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr,
130                                       PowerPCCPU *cpu)
131 {
132     assert(spapr->vsmt);
133     return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
134 }
135 
136 static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
137 {
138     /* Dummy entries correspond to unused ICPState objects in older QEMUs,
139      * and newer QEMUs don't even have them. In both cases, we don't want
140      * to send anything on the wire.
141      */
142     return false;
143 }
144 
145 static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
146     .name = "icp/server",
147     .version_id = 1,
148     .minimum_version_id = 1,
149     .needed = pre_2_10_vmstate_dummy_icp_needed,
150     .fields = (VMStateField[]) {
151         VMSTATE_UNUSED(4), /* uint32_t xirr */
152         VMSTATE_UNUSED(1), /* uint8_t pending_priority */
153         VMSTATE_UNUSED(1), /* uint8_t mfrr */
154         VMSTATE_END_OF_LIST()
155     },
156 };
157 
158 static void pre_2_10_vmstate_register_dummy_icp(int i)
159 {
160     vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
161                      (void *)(uintptr_t) i);
162 }
163 
164 static void pre_2_10_vmstate_unregister_dummy_icp(int i)
165 {
166     vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
167                        (void *)(uintptr_t) i);
168 }
169 
170 int spapr_max_server_number(SpaprMachineState *spapr)
171 {
172     MachineState *ms = MACHINE(spapr);
173 
174     assert(spapr->vsmt);
175     return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads);
176 }
177 
178 static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
179                                   int smt_threads)
180 {
181     int i, ret = 0;
182     g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads);
183     g_autofree uint32_t *gservers_prop = g_new(uint32_t, smt_threads * 2);
184     int index = spapr_get_vcpu_id(cpu);
185 
186     if (cpu->compat_pvr) {
187         ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
188         if (ret < 0) {
189             return ret;
190         }
191     }
192 
193     /* Build interrupt servers and gservers properties */
194     for (i = 0; i < smt_threads; i++) {
195         servers_prop[i] = cpu_to_be32(index + i);
196         /* Hack, direct the group queues back to cpu 0 */
197         gservers_prop[i*2] = cpu_to_be32(index + i);
198         gservers_prop[i*2 + 1] = 0;
199     }
200     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
201                       servers_prop, sizeof(*servers_prop) * smt_threads);
202     if (ret < 0) {
203         return ret;
204     }
205     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
206                       gservers_prop, sizeof(*gservers_prop) * smt_threads * 2);
207 
208     return ret;
209 }
210 
211 static void spapr_dt_pa_features(SpaprMachineState *spapr,
212                                  PowerPCCPU *cpu,
213                                  void *fdt, int offset)
214 {
215     uint8_t pa_features_206[] = { 6, 0,
216         0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
217     uint8_t pa_features_207[] = { 24, 0,
218         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
219         0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
220         0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
221         0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
222     uint8_t pa_features_300[] = { 66, 0,
223         /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
224         /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */
225         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */
226         /* 6: DS207 */
227         0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
228         /* 16: Vector */
229         0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
230         /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */
231         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
232         /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
233         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
234         /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */
235         0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
236         /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */
237         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */
238         /* 42: PM, 44: PC RA, 46: SC vec'd */
239         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
240         /* 48: SIMD, 50: QP BFP, 52: String */
241         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
242         /* 54: DecFP, 56: DecI, 58: SHA */
243         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
244         /* 60: NM atomic, 62: RNG */
245         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
246     };
247     uint8_t *pa_features = NULL;
248     size_t pa_size;
249 
250     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) {
251         pa_features = pa_features_206;
252         pa_size = sizeof(pa_features_206);
253     }
254     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) {
255         pa_features = pa_features_207;
256         pa_size = sizeof(pa_features_207);
257     }
258     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) {
259         pa_features = pa_features_300;
260         pa_size = sizeof(pa_features_300);
261     }
262     if (!pa_features) {
263         return;
264     }
265 
266     if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
267         /*
268          * Note: we keep CI large pages off by default because a 64K capable
269          * guest provisioned with large pages might otherwise try to map a qemu
270          * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
271          * even if that qemu runs on a 4k host.
272          * We dd this bit back here if we are confident this is not an issue
273          */
274         pa_features[3] |= 0x20;
275     }
276     if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) {
277         pa_features[24] |= 0x80;    /* Transactional memory support */
278     }
279     if (spapr->cas_pre_isa3_guest && pa_size > 40) {
280         /* Workaround for broken kernels that attempt (guest) radix
281          * mode when they can't handle it, if they see the radix bit set
282          * in pa-features. So hide it from them. */
283         pa_features[40 + 2] &= ~0x80; /* Radix MMU */
284     }
285 
286     _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
287 }
288 
289 static hwaddr spapr_node0_size(MachineState *machine)
290 {
291     if (machine->numa_state->num_nodes) {
292         int i;
293         for (i = 0; i < machine->numa_state->num_nodes; ++i) {
294             if (machine->numa_state->nodes[i].node_mem) {
295                 return MIN(pow2floor(machine->numa_state->nodes[i].node_mem),
296                            machine->ram_size);
297             }
298         }
299     }
300     return machine->ram_size;
301 }
302 
303 static void add_str(GString *s, const gchar *s1)
304 {
305     g_string_append_len(s, s1, strlen(s1) + 1);
306 }
307 
308 static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid,
309                                 hwaddr start, hwaddr size)
310 {
311     char mem_name[32];
312     uint64_t mem_reg_property[2];
313     int off;
314 
315     mem_reg_property[0] = cpu_to_be64(start);
316     mem_reg_property[1] = cpu_to_be64(size);
317 
318     sprintf(mem_name, "memory@%" HWADDR_PRIx, start);
319     off = fdt_add_subnode(fdt, 0, mem_name);
320     _FDT(off);
321     _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
322     _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
323                       sizeof(mem_reg_property))));
324     spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid);
325     return off;
326 }
327 
328 static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr)
329 {
330     MemoryDeviceInfoList *info;
331 
332     for (info = list; info; info = info->next) {
333         MemoryDeviceInfo *value = info->value;
334 
335         if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) {
336             PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data;
337 
338             if (addr >= pcdimm_info->addr &&
339                 addr < (pcdimm_info->addr + pcdimm_info->size)) {
340                 return pcdimm_info->node;
341             }
342         }
343     }
344 
345     return -1;
346 }
347 
348 struct sPAPRDrconfCellV2 {
349      uint32_t seq_lmbs;
350      uint64_t base_addr;
351      uint32_t drc_index;
352      uint32_t aa_index;
353      uint32_t flags;
354 } QEMU_PACKED;
355 
356 typedef struct DrconfCellQueue {
357     struct sPAPRDrconfCellV2 cell;
358     QSIMPLEQ_ENTRY(DrconfCellQueue) entry;
359 } DrconfCellQueue;
360 
361 static DrconfCellQueue *
362 spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr,
363                       uint32_t drc_index, uint32_t aa_index,
364                       uint32_t flags)
365 {
366     DrconfCellQueue *elem;
367 
368     elem = g_malloc0(sizeof(*elem));
369     elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs);
370     elem->cell.base_addr = cpu_to_be64(base_addr);
371     elem->cell.drc_index = cpu_to_be32(drc_index);
372     elem->cell.aa_index = cpu_to_be32(aa_index);
373     elem->cell.flags = cpu_to_be32(flags);
374 
375     return elem;
376 }
377 
378 static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt,
379                                       int offset, MemoryDeviceInfoList *dimms)
380 {
381     MachineState *machine = MACHINE(spapr);
382     uint8_t *int_buf, *cur_index;
383     int ret;
384     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
385     uint64_t addr, cur_addr, size;
386     uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size);
387     uint64_t mem_end = machine->device_memory->base +
388                        memory_region_size(&machine->device_memory->mr);
389     uint32_t node, buf_len, nr_entries = 0;
390     SpaprDrc *drc;
391     DrconfCellQueue *elem, *next;
392     MemoryDeviceInfoList *info;
393     QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue
394         = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue);
395 
396     /* Entry to cover RAM and the gap area */
397     elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1,
398                                  SPAPR_LMB_FLAGS_RESERVED |
399                                  SPAPR_LMB_FLAGS_DRC_INVALID);
400     QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
401     nr_entries++;
402 
403     cur_addr = machine->device_memory->base;
404     for (info = dimms; info; info = info->next) {
405         PCDIMMDeviceInfo *di = info->value->u.dimm.data;
406 
407         addr = di->addr;
408         size = di->size;
409         node = di->node;
410 
411         /*
412          * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The
413          * area is marked hotpluggable in the next iteration for the bigger
414          * chunk including the NVDIMM occupied area.
415          */
416         if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM)
417             continue;
418 
419         /* Entry for hot-pluggable area */
420         if (cur_addr < addr) {
421             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
422             g_assert(drc);
423             elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size,
424                                          cur_addr, spapr_drc_index(drc), -1, 0);
425             QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
426             nr_entries++;
427         }
428 
429         /* Entry for DIMM */
430         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size);
431         g_assert(drc);
432         elem = spapr_get_drconf_cell(size / lmb_size, addr,
433                                      spapr_drc_index(drc), node,
434                                      (SPAPR_LMB_FLAGS_ASSIGNED |
435                                       SPAPR_LMB_FLAGS_HOTREMOVABLE));
436         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
437         nr_entries++;
438         cur_addr = addr + size;
439     }
440 
441     /* Entry for remaining hotpluggable area */
442     if (cur_addr < mem_end) {
443         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
444         g_assert(drc);
445         elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size,
446                                      cur_addr, spapr_drc_index(drc), -1, 0);
447         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
448         nr_entries++;
449     }
450 
451     buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t);
452     int_buf = cur_index = g_malloc0(buf_len);
453     *(uint32_t *)int_buf = cpu_to_be32(nr_entries);
454     cur_index += sizeof(nr_entries);
455 
456     QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) {
457         memcpy(cur_index, &elem->cell, sizeof(elem->cell));
458         cur_index += sizeof(elem->cell);
459         QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry);
460         g_free(elem);
461     }
462 
463     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len);
464     g_free(int_buf);
465     if (ret < 0) {
466         return -1;
467     }
468     return 0;
469 }
470 
471 static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt,
472                                    int offset, MemoryDeviceInfoList *dimms)
473 {
474     MachineState *machine = MACHINE(spapr);
475     int i, ret;
476     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
477     uint32_t device_lmb_start = machine->device_memory->base / lmb_size;
478     uint32_t nr_lmbs = (machine->device_memory->base +
479                        memory_region_size(&machine->device_memory->mr)) /
480                        lmb_size;
481     uint32_t *int_buf, *cur_index, buf_len;
482 
483     /*
484      * Allocate enough buffer size to fit in ibm,dynamic-memory
485      */
486     buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t);
487     cur_index = int_buf = g_malloc0(buf_len);
488     int_buf[0] = cpu_to_be32(nr_lmbs);
489     cur_index++;
490     for (i = 0; i < nr_lmbs; i++) {
491         uint64_t addr = i * lmb_size;
492         uint32_t *dynamic_memory = cur_index;
493 
494         if (i >= device_lmb_start) {
495             SpaprDrc *drc;
496 
497             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
498             g_assert(drc);
499 
500             dynamic_memory[0] = cpu_to_be32(addr >> 32);
501             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
502             dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
503             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
504             dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr));
505             if (memory_region_present(get_system_memory(), addr)) {
506                 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
507             } else {
508                 dynamic_memory[5] = cpu_to_be32(0);
509             }
510         } else {
511             /*
512              * LMB information for RMA, boot time RAM and gap b/n RAM and
513              * device memory region -- all these are marked as reserved
514              * and as having no valid DRC.
515              */
516             dynamic_memory[0] = cpu_to_be32(addr >> 32);
517             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
518             dynamic_memory[2] = cpu_to_be32(0);
519             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
520             dynamic_memory[4] = cpu_to_be32(-1);
521             dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
522                                             SPAPR_LMB_FLAGS_DRC_INVALID);
523         }
524 
525         cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
526     }
527     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
528     g_free(int_buf);
529     if (ret < 0) {
530         return -1;
531     }
532     return 0;
533 }
534 
535 /*
536  * Adds ibm,dynamic-reconfiguration-memory node.
537  * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
538  * of this device tree node.
539  */
540 static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
541                                                    void *fdt)
542 {
543     MachineState *machine = MACHINE(spapr);
544     int ret, offset;
545     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
546     uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32),
547                                 cpu_to_be32(lmb_size & 0xffffffff)};
548     MemoryDeviceInfoList *dimms = NULL;
549 
550     /*
551      * Don't create the node if there is no device memory
552      */
553     if (machine->ram_size == machine->maxram_size) {
554         return 0;
555     }
556 
557     offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
558 
559     ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
560                     sizeof(prop_lmb_size));
561     if (ret < 0) {
562         return ret;
563     }
564 
565     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
566     if (ret < 0) {
567         return ret;
568     }
569 
570     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
571     if (ret < 0) {
572         return ret;
573     }
574 
575     /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */
576     dimms = qmp_memory_device_list();
577     if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) {
578         ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms);
579     } else {
580         ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms);
581     }
582     qapi_free_MemoryDeviceInfoList(dimms);
583 
584     if (ret < 0) {
585         return ret;
586     }
587 
588     ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset);
589 
590     return ret;
591 }
592 
593 static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
594 {
595     MachineState *machine = MACHINE(spapr);
596     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
597     hwaddr mem_start, node_size;
598     int i, nb_nodes = machine->numa_state->num_nodes;
599     NodeInfo *nodes = machine->numa_state->nodes;
600 
601     for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
602         if (!nodes[i].node_mem) {
603             continue;
604         }
605         if (mem_start >= machine->ram_size) {
606             node_size = 0;
607         } else {
608             node_size = nodes[i].node_mem;
609             if (node_size > machine->ram_size - mem_start) {
610                 node_size = machine->ram_size - mem_start;
611             }
612         }
613         if (!mem_start) {
614             /* spapr_machine_init() checks for rma_size <= node0_size
615              * already */
616             spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size);
617             mem_start += spapr->rma_size;
618             node_size -= spapr->rma_size;
619         }
620         for ( ; node_size; ) {
621             hwaddr sizetmp = pow2floor(node_size);
622 
623             /* mem_start != 0 here */
624             if (ctzl(mem_start) < ctzl(sizetmp)) {
625                 sizetmp = 1ULL << ctzl(mem_start);
626             }
627 
628             spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp);
629             node_size -= sizetmp;
630             mem_start += sizetmp;
631         }
632     }
633 
634     /* Generate ibm,dynamic-reconfiguration-memory node if required */
635     if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) {
636         int ret;
637 
638         g_assert(smc->dr_lmb_enabled);
639         ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt);
640         if (ret) {
641             return ret;
642         }
643     }
644 
645     return 0;
646 }
647 
648 static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset,
649                          SpaprMachineState *spapr)
650 {
651     MachineState *ms = MACHINE(spapr);
652     PowerPCCPU *cpu = POWERPC_CPU(cs);
653     CPUPPCState *env = &cpu->env;
654     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
655     int index = spapr_get_vcpu_id(cpu);
656     uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
657                        0xffffffff, 0xffffffff};
658     uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
659         : SPAPR_TIMEBASE_FREQ;
660     uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
661     uint32_t page_sizes_prop[64];
662     size_t page_sizes_prop_size;
663     unsigned int smp_threads = ms->smp.threads;
664     uint32_t vcpus_per_socket = smp_threads * ms->smp.cores;
665     uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
666     int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
667     SpaprDrc *drc;
668     int drc_index;
669     uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
670     int i;
671 
672     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index);
673     if (drc) {
674         drc_index = spapr_drc_index(drc);
675         _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
676     }
677 
678     _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
679     _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
680 
681     _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
682     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
683                            env->dcache_line_size)));
684     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
685                            env->dcache_line_size)));
686     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
687                            env->icache_line_size)));
688     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
689                            env->icache_line_size)));
690 
691     if (pcc->l1_dcache_size) {
692         _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
693                                pcc->l1_dcache_size)));
694     } else {
695         warn_report("Unknown L1 dcache size for cpu");
696     }
697     if (pcc->l1_icache_size) {
698         _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
699                                pcc->l1_icache_size)));
700     } else {
701         warn_report("Unknown L1 icache size for cpu");
702     }
703 
704     _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
705     _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
706     _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size)));
707     _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size)));
708     _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
709     _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
710 
711     if (ppc_has_spr(cpu, SPR_PURR)) {
712         _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1)));
713     }
714     if (ppc_has_spr(cpu, SPR_PURR)) {
715         _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1)));
716     }
717 
718     if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
719         _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
720                           segs, sizeof(segs))));
721     }
722 
723     /* Advertise VSX (vector extensions) if available
724      *   1               == VMX / Altivec available
725      *   2               == VSX available
726      *
727      * Only CPUs for which we create core types in spapr_cpu_core.c
728      * are possible, and all of those have VMX */
729     if (env->insns_flags & PPC_ALTIVEC) {
730         if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) {
731             _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2)));
732         } else {
733             _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1)));
734         }
735     }
736 
737     /* Advertise DFP (Decimal Floating Point) if available
738      *   0 / no property == no DFP
739      *   1               == DFP available */
740     if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) {
741         _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
742     }
743 
744     page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop,
745                                                       sizeof(page_sizes_prop));
746     if (page_sizes_prop_size) {
747         _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
748                           page_sizes_prop, page_sizes_prop_size)));
749     }
750 
751     spapr_dt_pa_features(spapr, cpu, fdt, offset);
752 
753     _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
754                            cs->cpu_index / vcpus_per_socket)));
755 
756     _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
757                       pft_size_prop, sizeof(pft_size_prop))));
758 
759     if (ms->numa_state->num_nodes > 1) {
760         _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu));
761     }
762 
763     _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
764 
765     if (pcc->radix_page_info) {
766         for (i = 0; i < pcc->radix_page_info->count; i++) {
767             radix_AP_encodings[i] =
768                 cpu_to_be32(pcc->radix_page_info->entries[i]);
769         }
770         _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
771                           radix_AP_encodings,
772                           pcc->radix_page_info->count *
773                           sizeof(radix_AP_encodings[0]))));
774     }
775 
776     /*
777      * We set this property to let the guest know that it can use the large
778      * decrementer and its width in bits.
779      */
780     if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF)
781         _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits",
782                               pcc->lrg_decr_bits)));
783 }
784 
785 static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
786 {
787     CPUState **rev;
788     CPUState *cs;
789     int n_cpus;
790     int cpus_offset;
791     int i;
792 
793     cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
794     _FDT(cpus_offset);
795     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
796     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
797 
798     /*
799      * We walk the CPUs in reverse order to ensure that CPU DT nodes
800      * created by fdt_add_subnode() end up in the right order in FDT
801      * for the guest kernel the enumerate the CPUs correctly.
802      *
803      * The CPU list cannot be traversed in reverse order, so we need
804      * to do extra work.
805      */
806     n_cpus = 0;
807     rev = NULL;
808     CPU_FOREACH(cs) {
809         rev = g_renew(CPUState *, rev, n_cpus + 1);
810         rev[n_cpus++] = cs;
811     }
812 
813     for (i = n_cpus - 1; i >= 0; i--) {
814         CPUState *cs = rev[i];
815         PowerPCCPU *cpu = POWERPC_CPU(cs);
816         int index = spapr_get_vcpu_id(cpu);
817         DeviceClass *dc = DEVICE_GET_CLASS(cs);
818         g_autofree char *nodename = NULL;
819         int offset;
820 
821         if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
822             continue;
823         }
824 
825         nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
826         offset = fdt_add_subnode(fdt, cpus_offset, nodename);
827         _FDT(offset);
828         spapr_dt_cpu(cs, fdt, offset, spapr);
829     }
830 
831     g_free(rev);
832 }
833 
834 static int spapr_dt_rng(void *fdt)
835 {
836     int node;
837     int ret;
838 
839     node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities");
840     if (node <= 0) {
841         return -1;
842     }
843     ret = fdt_setprop_string(fdt, node, "device_type",
844                              "ibm,platform-facilities");
845     ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1);
846     ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0);
847 
848     node = fdt_add_subnode(fdt, node, "ibm,random-v1");
849     if (node <= 0) {
850         return -1;
851     }
852     ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random");
853 
854     return ret ? -1 : 0;
855 }
856 
857 static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
858 {
859     MachineState *ms = MACHINE(spapr);
860     int rtas;
861     GString *hypertas = g_string_sized_new(256);
862     GString *qemu_hypertas = g_string_sized_new(256);
863     uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
864         memory_region_size(&MACHINE(spapr)->device_memory->mr);
865     uint32_t lrdr_capacity[] = {
866         cpu_to_be32(max_device_addr >> 32),
867         cpu_to_be32(max_device_addr & 0xffffffff),
868         cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32),
869         cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff),
870         cpu_to_be32(ms->smp.max_cpus / ms->smp.threads),
871     };
872 
873     _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
874 
875     /* hypertas */
876     add_str(hypertas, "hcall-pft");
877     add_str(hypertas, "hcall-term");
878     add_str(hypertas, "hcall-dabr");
879     add_str(hypertas, "hcall-interrupt");
880     add_str(hypertas, "hcall-tce");
881     add_str(hypertas, "hcall-vio");
882     add_str(hypertas, "hcall-splpar");
883     add_str(hypertas, "hcall-join");
884     add_str(hypertas, "hcall-bulk");
885     add_str(hypertas, "hcall-set-mode");
886     add_str(hypertas, "hcall-sprg0");
887     add_str(hypertas, "hcall-copy");
888     add_str(hypertas, "hcall-debug");
889     add_str(hypertas, "hcall-vphn");
890     if (spapr_get_cap(spapr, SPAPR_CAP_RPT_INVALIDATE) == SPAPR_CAP_ON) {
891         add_str(hypertas, "hcall-rpt-invalidate");
892     }
893 
894     add_str(qemu_hypertas, "hcall-memop1");
895 
896     if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
897         add_str(hypertas, "hcall-multi-tce");
898     }
899 
900     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
901         add_str(hypertas, "hcall-hpt-resize");
902     }
903 
904     add_str(hypertas, "hcall-watchdog");
905 
906     _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
907                      hypertas->str, hypertas->len));
908     g_string_free(hypertas, TRUE);
909     _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
910                      qemu_hypertas->str, qemu_hypertas->len));
911     g_string_free(qemu_hypertas, TRUE);
912 
913     spapr_numa_write_rtas_dt(spapr, fdt, rtas);
914 
915     /*
916      * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log,
917      * and 16 bytes per CPU for system reset error log plus an extra 8 bytes.
918      *
919      * The system reset requirements are driven by existing Linux and PowerVM
920      * implementation which (contrary to PAPR) saves r3 in the error log
921      * structure like machine check, so Linux expects to find the saved r3
922      * value at the address in r3 upon FWNMI-enabled sreset interrupt (and
923      * does not look at the error value).
924      *
925      * System reset interrupts are not subject to interlock like machine
926      * check, so this memory area could be corrupted if the sreset is
927      * interrupted by a machine check (or vice versa) if it was shared. To
928      * prevent this, system reset uses per-CPU areas for the sreset save
929      * area. A system reset that interrupts a system reset handler could
930      * still overwrite this area, but Linux doesn't try to recover in that
931      * case anyway.
932      *
933      * The extra 8 bytes is required because Linux's FWNMI error log check
934      * is off-by-one.
935      *
936      * RTAS_MIN_SIZE is required for the RTAS blob itself.
937      */
938     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_MIN_SIZE +
939                           RTAS_ERROR_LOG_MAX +
940                           ms->smp.max_cpus * sizeof(uint64_t) * 2 +
941                           sizeof(uint64_t)));
942     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
943                           RTAS_ERROR_LOG_MAX));
944     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
945                           RTAS_EVENT_SCAN_RATE));
946 
947     g_assert(msi_nonbroken);
948     _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
949 
950     /*
951      * According to PAPR, rtas ibm,os-term does not guarantee a return
952      * back to the guest cpu.
953      *
954      * While an additional ibm,extended-os-term property indicates
955      * that rtas call return will always occur. Set this property.
956      */
957     _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
958 
959     _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
960                      lrdr_capacity, sizeof(lrdr_capacity)));
961 
962     spapr_dt_rtas_tokens(fdt, rtas);
963 }
964 
965 /*
966  * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU
967  * and the XIVE features that the guest may request and thus the valid
968  * values for bytes 23..26 of option vector 5:
969  */
970 static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt,
971                                           int chosen)
972 {
973     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
974 
975     char val[2 * 4] = {
976         23, 0x00, /* XICS / XIVE mode */
977         24, 0x00, /* Hash/Radix, filled in below. */
978         25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
979         26, 0x40, /* Radix options: GTSE == yes. */
980     };
981 
982     if (spapr->irq->xics && spapr->irq->xive) {
983         val[1] = SPAPR_OV5_XIVE_BOTH;
984     } else if (spapr->irq->xive) {
985         val[1] = SPAPR_OV5_XIVE_EXPLOIT;
986     } else {
987         assert(spapr->irq->xics);
988         val[1] = SPAPR_OV5_XIVE_LEGACY;
989     }
990 
991     if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
992                           first_ppc_cpu->compat_pvr)) {
993         /*
994          * If we're in a pre POWER9 compat mode then the guest should
995          * do hash and use the legacy interrupt mode
996          */
997         val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */
998         val[3] = 0x00; /* Hash */
999         spapr_check_mmu_mode(false);
1000     } else if (kvm_enabled()) {
1001         if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
1002             val[3] = 0x80; /* OV5_MMU_BOTH */
1003         } else if (kvmppc_has_cap_mmu_radix()) {
1004             val[3] = 0x40; /* OV5_MMU_RADIX_300 */
1005         } else {
1006             val[3] = 0x00; /* Hash */
1007         }
1008     } else {
1009         /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
1010         val[3] = 0xC0;
1011     }
1012     _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
1013                      val, sizeof(val)));
1014 }
1015 
1016 static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
1017 {
1018     MachineState *machine = MACHINE(spapr);
1019     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1020     uint8_t rng_seed[32];
1021     int chosen;
1022 
1023     _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
1024 
1025     if (reset) {
1026         const char *boot_device = spapr->boot_device;
1027         g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
1028         size_t cb = 0;
1029         g_autofree char *bootlist = get_boot_devices_list(&cb);
1030 
1031         if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
1032             _FDT(fdt_setprop_string(fdt, chosen, "bootargs",
1033                                     machine->kernel_cmdline));
1034         }
1035 
1036         if (spapr->initrd_size) {
1037             _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
1038                                   spapr->initrd_base));
1039             _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
1040                                   spapr->initrd_base + spapr->initrd_size));
1041         }
1042 
1043         if (spapr->kernel_size) {
1044             uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr),
1045                                   cpu_to_be64(spapr->kernel_size) };
1046 
1047             _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
1048                          &kprop, sizeof(kprop)));
1049             if (spapr->kernel_le) {
1050                 _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
1051             }
1052         }
1053         if (machine->boot_config.has_menu && machine->boot_config.menu) {
1054             _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", true)));
1055         }
1056         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
1057         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
1058         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
1059 
1060         if (cb && bootlist) {
1061             int i;
1062 
1063             for (i = 0; i < cb; i++) {
1064                 if (bootlist[i] == '\n') {
1065                     bootlist[i] = ' ';
1066                 }
1067             }
1068             _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
1069         }
1070 
1071         if (boot_device && strlen(boot_device)) {
1072             _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
1073         }
1074 
1075         if (spapr->want_stdout_path && stdout_path) {
1076             /*
1077              * "linux,stdout-path" and "stdout" properties are
1078              * deprecated by linux kernel. New platforms should only
1079              * use the "stdout-path" property. Set the new property
1080              * and continue using older property to remain compatible
1081              * with the existing firmware.
1082              */
1083             _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
1084             _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path));
1085         }
1086 
1087         /*
1088          * We can deal with BAR reallocation just fine, advertise it
1089          * to the guest
1090          */
1091         if (smc->linux_pci_probe) {
1092             _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0));
1093         }
1094 
1095         spapr_dt_ov5_platform_support(spapr, fdt, chosen);
1096     }
1097 
1098     qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed));
1099     _FDT(fdt_setprop(fdt, chosen, "rng-seed", rng_seed, sizeof(rng_seed)));
1100 
1101     _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5"));
1102 }
1103 
1104 static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt)
1105 {
1106     /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1107      * KVM to work under pHyp with some guest co-operation */
1108     int hypervisor;
1109     uint8_t hypercall[16];
1110 
1111     _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
1112     /* indicate KVM hypercall interface */
1113     _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
1114     if (kvmppc_has_cap_fixup_hcalls()) {
1115         /*
1116          * Older KVM versions with older guest kernels were broken
1117          * with the magic page, don't allow the guest to map it.
1118          */
1119         if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
1120                                   sizeof(hypercall))) {
1121             _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
1122                              hypercall, sizeof(hypercall)));
1123         }
1124     }
1125 }
1126 
1127 void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space)
1128 {
1129     MachineState *machine = MACHINE(spapr);
1130     MachineClass *mc = MACHINE_GET_CLASS(machine);
1131     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1132     uint32_t root_drc_type_mask = 0;
1133     int ret;
1134     void *fdt;
1135     SpaprPhbState *phb;
1136     char *buf;
1137 
1138     fdt = g_malloc0(space);
1139     _FDT((fdt_create_empty_tree(fdt, space)));
1140 
1141     /* Root node */
1142     _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
1143     _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
1144     _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
1145 
1146     /* Guest UUID & Name*/
1147     buf = qemu_uuid_unparse_strdup(&qemu_uuid);
1148     _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
1149     if (qemu_uuid_set) {
1150         _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
1151     }
1152     g_free(buf);
1153 
1154     if (qemu_get_vm_name()) {
1155         _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
1156                                 qemu_get_vm_name()));
1157     }
1158 
1159     /* Host Model & Serial Number */
1160     if (spapr->host_model) {
1161         _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model));
1162     } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) {
1163         _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
1164         g_free(buf);
1165     }
1166 
1167     if (spapr->host_serial) {
1168         _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial));
1169     } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) {
1170         _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
1171         g_free(buf);
1172     }
1173 
1174     _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
1175     _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
1176 
1177     /* /interrupt controller */
1178     spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC);
1179 
1180     ret = spapr_dt_memory(spapr, fdt);
1181     if (ret < 0) {
1182         error_report("couldn't setup memory nodes in fdt");
1183         exit(1);
1184     }
1185 
1186     /* /vdevice */
1187     spapr_dt_vdevice(spapr->vio_bus, fdt);
1188 
1189     if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
1190         ret = spapr_dt_rng(fdt);
1191         if (ret < 0) {
1192             error_report("could not set up rng device in the fdt");
1193             exit(1);
1194         }
1195     }
1196 
1197     QLIST_FOREACH(phb, &spapr->phbs, list) {
1198         ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL);
1199         if (ret < 0) {
1200             error_report("couldn't setup PCI devices in fdt");
1201             exit(1);
1202         }
1203     }
1204 
1205     spapr_dt_cpus(fdt, spapr);
1206 
1207     /* ibm,drc-indexes and friends */
1208     if (smc->dr_lmb_enabled) {
1209         root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB;
1210     }
1211     if (smc->dr_phb_enabled) {
1212         root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB;
1213     }
1214     if (mc->nvdimm_supported) {
1215         root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PMEM;
1216     }
1217     if (root_drc_type_mask) {
1218         _FDT(spapr_dt_drc(fdt, 0, NULL, root_drc_type_mask));
1219     }
1220 
1221     if (mc->has_hotpluggable_cpus) {
1222         int offset = fdt_path_offset(fdt, "/cpus");
1223         ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU);
1224         if (ret < 0) {
1225             error_report("Couldn't set up CPU DR device tree properties");
1226             exit(1);
1227         }
1228     }
1229 
1230     /* /event-sources */
1231     spapr_dt_events(spapr, fdt);
1232 
1233     /* /rtas */
1234     spapr_dt_rtas(spapr, fdt);
1235 
1236     /* /chosen */
1237     spapr_dt_chosen(spapr, fdt, reset);
1238 
1239     /* /hypervisor */
1240     if (kvm_enabled()) {
1241         spapr_dt_hypervisor(spapr, fdt);
1242     }
1243 
1244     /* Build memory reserve map */
1245     if (reset) {
1246         if (spapr->kernel_size) {
1247             _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr,
1248                                   spapr->kernel_size)));
1249         }
1250         if (spapr->initrd_size) {
1251             _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base,
1252                                   spapr->initrd_size)));
1253         }
1254     }
1255 
1256     /* NVDIMM devices */
1257     if (mc->nvdimm_supported) {
1258         spapr_dt_persistent_memory(spapr, fdt);
1259     }
1260 
1261     return fdt;
1262 }
1263 
1264 static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
1265 {
1266     SpaprMachineState *spapr = opaque;
1267 
1268     return (addr & 0x0fffffff) + spapr->kernel_addr;
1269 }
1270 
1271 static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1272                                     PowerPCCPU *cpu)
1273 {
1274     CPUPPCState *env = &cpu->env;
1275 
1276     /* The TCG path should also be holding the BQL at this point */
1277     g_assert(qemu_mutex_iothread_locked());
1278 
1279     g_assert(!vhyp_cpu_in_nested(cpu));
1280 
1281     if (FIELD_EX64(env->msr, MSR, PR)) {
1282         hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1283         env->gpr[3] = H_PRIVILEGE;
1284     } else {
1285         env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
1286     }
1287 }
1288 
1289 struct LPCRSyncState {
1290     target_ulong value;
1291     target_ulong mask;
1292 };
1293 
1294 static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg)
1295 {
1296     struct LPCRSyncState *s = arg.host_ptr;
1297     PowerPCCPU *cpu = POWERPC_CPU(cs);
1298     CPUPPCState *env = &cpu->env;
1299     target_ulong lpcr;
1300 
1301     cpu_synchronize_state(cs);
1302     lpcr = env->spr[SPR_LPCR];
1303     lpcr &= ~s->mask;
1304     lpcr |= s->value;
1305     ppc_store_lpcr(cpu, lpcr);
1306 }
1307 
1308 void spapr_set_all_lpcrs(target_ulong value, target_ulong mask)
1309 {
1310     CPUState *cs;
1311     struct LPCRSyncState s = {
1312         .value = value,
1313         .mask = mask
1314     };
1315     CPU_FOREACH(cs) {
1316         run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s));
1317     }
1318 }
1319 
1320 static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
1321                            target_ulong lpid, ppc_v3_pate_t *entry)
1322 {
1323     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1324     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1325 
1326     if (!spapr_cpu->in_nested) {
1327         assert(lpid == 0);
1328 
1329         /* Copy PATE1:GR into PATE0:HR */
1330         entry->dw0 = spapr->patb_entry & PATE0_HR;
1331         entry->dw1 = spapr->patb_entry;
1332 
1333     } else {
1334         uint64_t patb, pats;
1335 
1336         assert(lpid != 0);
1337 
1338         patb = spapr->nested_ptcr & PTCR_PATB;
1339         pats = spapr->nested_ptcr & PTCR_PATS;
1340 
1341         /* Check if partition table is properly aligned */
1342         if (patb & MAKE_64BIT_MASK(0, pats + 12)) {
1343             return false;
1344         }
1345 
1346         /* Calculate number of entries */
1347         pats = 1ull << (pats + 12 - 4);
1348         if (pats <= lpid) {
1349             return false;
1350         }
1351 
1352         /* Grab entry */
1353         patb += 16 * lpid;
1354         entry->dw0 = ldq_phys(CPU(cpu)->as, patb);
1355         entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8);
1356     }
1357 
1358     return true;
1359 }
1360 
1361 #define HPTE(_table, _i)   (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1362 #define HPTE_VALID(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1363 #define HPTE_DIRTY(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1364 #define CLEAN_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1365 #define DIRTY_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1366 
1367 /*
1368  * Get the fd to access the kernel htab, re-opening it if necessary
1369  */
1370 static int get_htab_fd(SpaprMachineState *spapr)
1371 {
1372     Error *local_err = NULL;
1373 
1374     if (spapr->htab_fd >= 0) {
1375         return spapr->htab_fd;
1376     }
1377 
1378     spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err);
1379     if (spapr->htab_fd < 0) {
1380         error_report_err(local_err);
1381     }
1382 
1383     return spapr->htab_fd;
1384 }
1385 
1386 void close_htab_fd(SpaprMachineState *spapr)
1387 {
1388     if (spapr->htab_fd >= 0) {
1389         close(spapr->htab_fd);
1390     }
1391     spapr->htab_fd = -1;
1392 }
1393 
1394 static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
1395 {
1396     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1397 
1398     return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
1399 }
1400 
1401 static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp)
1402 {
1403     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1404 
1405     assert(kvm_enabled());
1406 
1407     if (!spapr->htab) {
1408         return 0;
1409     }
1410 
1411     return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18);
1412 }
1413 
1414 static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
1415                                                 hwaddr ptex, int n)
1416 {
1417     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1418     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
1419 
1420     if (!spapr->htab) {
1421         /*
1422          * HTAB is controlled by KVM. Fetch into temporary buffer
1423          */
1424         ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
1425         kvmppc_read_hptes(hptes, ptex, n);
1426         return hptes;
1427     }
1428 
1429     /*
1430      * HTAB is controlled by QEMU. Just point to the internally
1431      * accessible PTEG.
1432      */
1433     return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
1434 }
1435 
1436 static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
1437                               const ppc_hash_pte64_t *hptes,
1438                               hwaddr ptex, int n)
1439 {
1440     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1441 
1442     if (!spapr->htab) {
1443         g_free((void *)hptes);
1444     }
1445 
1446     /* Nothing to do for qemu managed HPT */
1447 }
1448 
1449 void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
1450                       uint64_t pte0, uint64_t pte1)
1451 {
1452     SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp);
1453     hwaddr offset = ptex * HASH_PTE_SIZE_64;
1454 
1455     if (!spapr->htab) {
1456         kvmppc_write_hpte(ptex, pte0, pte1);
1457     } else {
1458         if (pte0 & HPTE64_V_VALID) {
1459             stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
1460             /*
1461              * When setting valid, we write PTE1 first. This ensures
1462              * proper synchronization with the reading code in
1463              * ppc_hash64_pteg_search()
1464              */
1465             smp_wmb();
1466             stq_p(spapr->htab + offset, pte0);
1467         } else {
1468             stq_p(spapr->htab + offset, pte0);
1469             /*
1470              * When clearing it we set PTE0 first. This ensures proper
1471              * synchronization with the reading code in
1472              * ppc_hash64_pteg_search()
1473              */
1474             smp_wmb();
1475             stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
1476         }
1477     }
1478 }
1479 
1480 static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1481                              uint64_t pte1)
1482 {
1483     hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
1484     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1485 
1486     if (!spapr->htab) {
1487         /* There should always be a hash table when this is called */
1488         error_report("spapr_hpte_set_c called with no hash table !");
1489         return;
1490     }
1491 
1492     /* The HW performs a non-atomic byte update */
1493     stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80);
1494 }
1495 
1496 static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1497                              uint64_t pte1)
1498 {
1499     hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
1500     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1501 
1502     if (!spapr->htab) {
1503         /* There should always be a hash table when this is called */
1504         error_report("spapr_hpte_set_r called with no hash table !");
1505         return;
1506     }
1507 
1508     /* The HW performs a non-atomic byte update */
1509     stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01);
1510 }
1511 
1512 int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
1513 {
1514     int shift;
1515 
1516     /* We aim for a hash table of size 1/128 the size of RAM (rounded
1517      * up).  The PAPR recommendation is actually 1/64 of RAM size, but
1518      * that's much more than is needed for Linux guests */
1519     shift = ctz64(pow2ceil(ramsize)) - 7;
1520     shift = MAX(shift, 18); /* Minimum architected size */
1521     shift = MIN(shift, 46); /* Maximum architected size */
1522     return shift;
1523 }
1524 
1525 void spapr_free_hpt(SpaprMachineState *spapr)
1526 {
1527     qemu_vfree(spapr->htab);
1528     spapr->htab = NULL;
1529     spapr->htab_shift = 0;
1530     close_htab_fd(spapr);
1531 }
1532 
1533 int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp)
1534 {
1535     ERRP_GUARD();
1536     long rc;
1537 
1538     /* Clean up any HPT info from a previous boot */
1539     spapr_free_hpt(spapr);
1540 
1541     rc = kvmppc_reset_htab(shift);
1542 
1543     if (rc == -EOPNOTSUPP) {
1544         error_setg(errp, "HPT not supported in nested guests");
1545         return -EOPNOTSUPP;
1546     }
1547 
1548     if (rc < 0) {
1549         /* kernel-side HPT needed, but couldn't allocate one */
1550         error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d",
1551                          shift);
1552         error_append_hint(errp, "Try smaller maxmem?\n");
1553         return -errno;
1554     } else if (rc > 0) {
1555         /* kernel-side HPT allocated */
1556         if (rc != shift) {
1557             error_setg(errp,
1558                        "Requested order %d HPT, but kernel allocated order %ld",
1559                        shift, rc);
1560             error_append_hint(errp, "Try smaller maxmem?\n");
1561             return -ENOSPC;
1562         }
1563 
1564         spapr->htab_shift = shift;
1565         spapr->htab = NULL;
1566     } else {
1567         /* kernel-side HPT not needed, allocate in userspace instead */
1568         size_t size = 1ULL << shift;
1569         int i;
1570 
1571         spapr->htab = qemu_memalign(size, size);
1572         memset(spapr->htab, 0, size);
1573         spapr->htab_shift = shift;
1574 
1575         for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
1576             DIRTY_HPTE(HPTE(spapr->htab, i));
1577         }
1578     }
1579     /* We're setting up a hash table, so that means we're not radix */
1580     spapr->patb_entry = 0;
1581     spapr_set_all_lpcrs(0, LPCR_HR | LPCR_UPRT);
1582     return 0;
1583 }
1584 
1585 void spapr_setup_hpt(SpaprMachineState *spapr)
1586 {
1587     int hpt_shift;
1588 
1589     if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
1590         hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1591     } else {
1592         uint64_t current_ram_size;
1593 
1594         current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
1595         hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size);
1596     }
1597     spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal);
1598 
1599     if (kvm_enabled()) {
1600         hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift);
1601 
1602         /* Check our RMA fits in the possible VRMA */
1603         if (vrma_limit < spapr->rma_size) {
1604             error_report("Unable to create %" HWADDR_PRIu
1605                          "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB",
1606                          spapr->rma_size / MiB, vrma_limit / MiB);
1607             exit(EXIT_FAILURE);
1608         }
1609     }
1610 }
1611 
1612 void spapr_check_mmu_mode(bool guest_radix)
1613 {
1614     if (guest_radix) {
1615         if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
1616             error_report("Guest requested unavailable MMU mode (radix).");
1617             exit(EXIT_FAILURE);
1618         }
1619     } else {
1620         if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
1621             && !kvmppc_has_cap_mmu_hash_v3()) {
1622             error_report("Guest requested unavailable MMU mode (hash).");
1623             exit(EXIT_FAILURE);
1624         }
1625     }
1626 }
1627 
1628 static void spapr_machine_reset(MachineState *machine, ShutdownCause reason)
1629 {
1630     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
1631     PowerPCCPU *first_ppc_cpu;
1632     hwaddr fdt_addr;
1633     void *fdt;
1634     int rc;
1635 
1636     pef_kvm_reset(machine->cgs, &error_fatal);
1637     spapr_caps_apply(spapr);
1638 
1639     first_ppc_cpu = POWERPC_CPU(first_cpu);
1640     if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1641         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
1642                               spapr->max_compat_pvr)) {
1643         /*
1644          * If using KVM with radix mode available, VCPUs can be started
1645          * without a HPT because KVM will start them in radix mode.
1646          * Set the GR bit in PATE so that we know there is no HPT.
1647          */
1648         spapr->patb_entry = PATE1_GR;
1649         spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT);
1650     } else {
1651         spapr_setup_hpt(spapr);
1652     }
1653 
1654     qemu_devices_reset(reason);
1655 
1656     spapr_ovec_cleanup(spapr->ov5_cas);
1657     spapr->ov5_cas = spapr_ovec_new();
1658 
1659     ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal);
1660 
1661     /*
1662      * This is fixing some of the default configuration of the XIVE
1663      * devices. To be called after the reset of the machine devices.
1664      */
1665     spapr_irq_reset(spapr, &error_fatal);
1666 
1667     /*
1668      * There is no CAS under qtest. Simulate one to please the code that
1669      * depends on spapr->ov5_cas. This is especially needed to test device
1670      * unplug, so we do that before resetting the DRCs.
1671      */
1672     if (qtest_enabled()) {
1673         spapr_ovec_cleanup(spapr->ov5_cas);
1674         spapr->ov5_cas = spapr_ovec_clone(spapr->ov5);
1675     }
1676 
1677     spapr_nvdimm_finish_flushes();
1678 
1679     /* DRC reset may cause a device to be unplugged. This will cause troubles
1680      * if this device is used by another device (eg, a running vhost backend
1681      * will crash QEMU if the DIMM holding the vring goes away). To avoid such
1682      * situations, we reset DRCs after all devices have been reset.
1683      */
1684     spapr_drc_reset_all(spapr);
1685 
1686     spapr_clear_pending_events(spapr);
1687 
1688     /*
1689      * We place the device tree just below either the top of the RMA,
1690      * or just below 2GB, whichever is lower, so that it can be
1691      * processed with 32-bit real mode code if necessary
1692      */
1693     fdt_addr = MIN(spapr->rma_size, FDT_MAX_ADDR) - FDT_MAX_SIZE;
1694 
1695     fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE);
1696     if (spapr->vof) {
1697         spapr_vof_reset(spapr, fdt, &error_fatal);
1698         /*
1699          * Do not pack the FDT as the client may change properties.
1700          * VOF client does not expect the FDT so we do not load it to the VM.
1701          */
1702     } else {
1703         rc = fdt_pack(fdt);
1704         /* Should only fail if we've built a corrupted tree */
1705         assert(rc == 0);
1706 
1707         spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT,
1708                                   0, fdt_addr, 0);
1709         cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
1710     }
1711     qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
1712 
1713     g_free(spapr->fdt_blob);
1714     spapr->fdt_size = fdt_totalsize(fdt);
1715     spapr->fdt_initial_size = spapr->fdt_size;
1716     spapr->fdt_blob = fdt;
1717 
1718     /* Set machine->fdt for 'dumpdtb' QMP/HMP command */
1719     machine->fdt = fdt;
1720 
1721     /* Set up the entry state */
1722     first_ppc_cpu->env.gpr[5] = 0;
1723 
1724     spapr->fwnmi_system_reset_addr = -1;
1725     spapr->fwnmi_machine_check_addr = -1;
1726     spapr->fwnmi_machine_check_interlock = -1;
1727 
1728     /* Signal all vCPUs waiting on this condition */
1729     qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond);
1730 
1731     migrate_del_blocker(spapr->fwnmi_migration_blocker);
1732 }
1733 
1734 static void spapr_create_nvram(SpaprMachineState *spapr)
1735 {
1736     DeviceState *dev = qdev_new("spapr-nvram");
1737     DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
1738 
1739     if (dinfo) {
1740         qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo),
1741                                 &error_fatal);
1742     }
1743 
1744     qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal);
1745 
1746     spapr->nvram = (struct SpaprNvram *)dev;
1747 }
1748 
1749 static void spapr_rtc_create(SpaprMachineState *spapr)
1750 {
1751     object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc,
1752                                        sizeof(spapr->rtc), TYPE_SPAPR_RTC,
1753                                        &error_fatal, NULL);
1754     qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal);
1755     object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
1756                               "date");
1757 }
1758 
1759 /* Returns whether we want to use VGA or not */
1760 static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
1761 {
1762     vga_interface_created = true;
1763     switch (vga_interface_type) {
1764     case VGA_NONE:
1765         return false;
1766     case VGA_DEVICE:
1767         return true;
1768     case VGA_STD:
1769     case VGA_VIRTIO:
1770     case VGA_CIRRUS:
1771         return pci_vga_init(pci_bus) != NULL;
1772     default:
1773         error_setg(errp,
1774                    "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1775         return false;
1776     }
1777 }
1778 
1779 static int spapr_pre_load(void *opaque)
1780 {
1781     int rc;
1782 
1783     rc = spapr_caps_pre_load(opaque);
1784     if (rc) {
1785         return rc;
1786     }
1787 
1788     return 0;
1789 }
1790 
1791 static int spapr_post_load(void *opaque, int version_id)
1792 {
1793     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1794     int err = 0;
1795 
1796     err = spapr_caps_post_migration(spapr);
1797     if (err) {
1798         return err;
1799     }
1800 
1801     /*
1802      * In earlier versions, there was no separate qdev for the PAPR
1803      * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1804      * So when migrating from those versions, poke the incoming offset
1805      * value into the RTC device
1806      */
1807     if (version_id < 3) {
1808         err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
1809         if (err) {
1810             return err;
1811         }
1812     }
1813 
1814     if (kvm_enabled() && spapr->patb_entry) {
1815         PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
1816         bool radix = !!(spapr->patb_entry & PATE1_GR);
1817         bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
1818 
1819         /*
1820          * Update LPCR:HR and UPRT as they may not be set properly in
1821          * the stream
1822          */
1823         spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0,
1824                             LPCR_HR | LPCR_UPRT);
1825 
1826         err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
1827         if (err) {
1828             error_report("Process table config unsupported by the host");
1829             return -EINVAL;
1830         }
1831     }
1832 
1833     err = spapr_irq_post_load(spapr, version_id);
1834     if (err) {
1835         return err;
1836     }
1837 
1838     return err;
1839 }
1840 
1841 static int spapr_pre_save(void *opaque)
1842 {
1843     int rc;
1844 
1845     rc = spapr_caps_pre_save(opaque);
1846     if (rc) {
1847         return rc;
1848     }
1849 
1850     return 0;
1851 }
1852 
1853 static bool version_before_3(void *opaque, int version_id)
1854 {
1855     return version_id < 3;
1856 }
1857 
1858 static bool spapr_pending_events_needed(void *opaque)
1859 {
1860     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1861     return !QTAILQ_EMPTY(&spapr->pending_events);
1862 }
1863 
1864 static const VMStateDescription vmstate_spapr_event_entry = {
1865     .name = "spapr_event_log_entry",
1866     .version_id = 1,
1867     .minimum_version_id = 1,
1868     .fields = (VMStateField[]) {
1869         VMSTATE_UINT32(summary, SpaprEventLogEntry),
1870         VMSTATE_UINT32(extended_length, SpaprEventLogEntry),
1871         VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0,
1872                                      NULL, extended_length),
1873         VMSTATE_END_OF_LIST()
1874     },
1875 };
1876 
1877 static const VMStateDescription vmstate_spapr_pending_events = {
1878     .name = "spapr_pending_events",
1879     .version_id = 1,
1880     .minimum_version_id = 1,
1881     .needed = spapr_pending_events_needed,
1882     .fields = (VMStateField[]) {
1883         VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1,
1884                          vmstate_spapr_event_entry, SpaprEventLogEntry, next),
1885         VMSTATE_END_OF_LIST()
1886     },
1887 };
1888 
1889 static bool spapr_ov5_cas_needed(void *opaque)
1890 {
1891     SpaprMachineState *spapr = opaque;
1892     SpaprOptionVector *ov5_mask = spapr_ovec_new();
1893     bool cas_needed;
1894 
1895     /* Prior to the introduction of SpaprOptionVector, we had two option
1896      * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1897      * Both of these options encode machine topology into the device-tree
1898      * in such a way that the now-booted OS should still be able to interact
1899      * appropriately with QEMU regardless of what options were actually
1900      * negotiatied on the source side.
1901      *
1902      * As such, we can avoid migrating the CAS-negotiated options if these
1903      * are the only options available on the current machine/platform.
1904      * Since these are the only options available for pseries-2.7 and
1905      * earlier, this allows us to maintain old->new/new->old migration
1906      * compatibility.
1907      *
1908      * For QEMU 2.8+, there are additional CAS-negotiatable options available
1909      * via default pseries-2.8 machines and explicit command-line parameters.
1910      * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
1911      * of the actual CAS-negotiated values to continue working properly. For
1912      * example, availability of memory unplug depends on knowing whether
1913      * OV5_HP_EVT was negotiated via CAS.
1914      *
1915      * Thus, for any cases where the set of available CAS-negotiatable
1916      * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
1917      * include the CAS-negotiated options in the migration stream, unless
1918      * if they affect boot time behaviour only.
1919      */
1920     spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
1921     spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
1922     spapr_ovec_set(ov5_mask, OV5_DRMEM_V2);
1923 
1924     /* We need extra information if we have any bits outside the mask
1925      * defined above */
1926     cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask);
1927 
1928     spapr_ovec_cleanup(ov5_mask);
1929 
1930     return cas_needed;
1931 }
1932 
1933 static const VMStateDescription vmstate_spapr_ov5_cas = {
1934     .name = "spapr_option_vector_ov5_cas",
1935     .version_id = 1,
1936     .minimum_version_id = 1,
1937     .needed = spapr_ov5_cas_needed,
1938     .fields = (VMStateField[]) {
1939         VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1,
1940                                  vmstate_spapr_ovec, SpaprOptionVector),
1941         VMSTATE_END_OF_LIST()
1942     },
1943 };
1944 
1945 static bool spapr_patb_entry_needed(void *opaque)
1946 {
1947     SpaprMachineState *spapr = opaque;
1948 
1949     return !!spapr->patb_entry;
1950 }
1951 
1952 static const VMStateDescription vmstate_spapr_patb_entry = {
1953     .name = "spapr_patb_entry",
1954     .version_id = 1,
1955     .minimum_version_id = 1,
1956     .needed = spapr_patb_entry_needed,
1957     .fields = (VMStateField[]) {
1958         VMSTATE_UINT64(patb_entry, SpaprMachineState),
1959         VMSTATE_END_OF_LIST()
1960     },
1961 };
1962 
1963 static bool spapr_irq_map_needed(void *opaque)
1964 {
1965     SpaprMachineState *spapr = opaque;
1966 
1967     return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr);
1968 }
1969 
1970 static const VMStateDescription vmstate_spapr_irq_map = {
1971     .name = "spapr_irq_map",
1972     .version_id = 1,
1973     .minimum_version_id = 1,
1974     .needed = spapr_irq_map_needed,
1975     .fields = (VMStateField[]) {
1976         VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr),
1977         VMSTATE_END_OF_LIST()
1978     },
1979 };
1980 
1981 static bool spapr_dtb_needed(void *opaque)
1982 {
1983     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque);
1984 
1985     return smc->update_dt_enabled;
1986 }
1987 
1988 static int spapr_dtb_pre_load(void *opaque)
1989 {
1990     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1991 
1992     g_free(spapr->fdt_blob);
1993     spapr->fdt_blob = NULL;
1994     spapr->fdt_size = 0;
1995 
1996     return 0;
1997 }
1998 
1999 static const VMStateDescription vmstate_spapr_dtb = {
2000     .name = "spapr_dtb",
2001     .version_id = 1,
2002     .minimum_version_id = 1,
2003     .needed = spapr_dtb_needed,
2004     .pre_load = spapr_dtb_pre_load,
2005     .fields = (VMStateField[]) {
2006         VMSTATE_UINT32(fdt_initial_size, SpaprMachineState),
2007         VMSTATE_UINT32(fdt_size, SpaprMachineState),
2008         VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL,
2009                                      fdt_size),
2010         VMSTATE_END_OF_LIST()
2011     },
2012 };
2013 
2014 static bool spapr_fwnmi_needed(void *opaque)
2015 {
2016     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2017 
2018     return spapr->fwnmi_machine_check_addr != -1;
2019 }
2020 
2021 static int spapr_fwnmi_pre_save(void *opaque)
2022 {
2023     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2024 
2025     /*
2026      * Check if machine check handling is in progress and print a
2027      * warning message.
2028      */
2029     if (spapr->fwnmi_machine_check_interlock != -1) {
2030         warn_report("A machine check is being handled during migration. The"
2031                 "handler may run and log hardware error on the destination");
2032     }
2033 
2034     return 0;
2035 }
2036 
2037 static const VMStateDescription vmstate_spapr_fwnmi = {
2038     .name = "spapr_fwnmi",
2039     .version_id = 1,
2040     .minimum_version_id = 1,
2041     .needed = spapr_fwnmi_needed,
2042     .pre_save = spapr_fwnmi_pre_save,
2043     .fields = (VMStateField[]) {
2044         VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState),
2045         VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState),
2046         VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState),
2047         VMSTATE_END_OF_LIST()
2048     },
2049 };
2050 
2051 static const VMStateDescription vmstate_spapr = {
2052     .name = "spapr",
2053     .version_id = 3,
2054     .minimum_version_id = 1,
2055     .pre_load = spapr_pre_load,
2056     .post_load = spapr_post_load,
2057     .pre_save = spapr_pre_save,
2058     .fields = (VMStateField[]) {
2059         /* used to be @next_irq */
2060         VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
2061 
2062         /* RTC offset */
2063         VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3),
2064 
2065         VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2),
2066         VMSTATE_END_OF_LIST()
2067     },
2068     .subsections = (const VMStateDescription*[]) {
2069         &vmstate_spapr_ov5_cas,
2070         &vmstate_spapr_patb_entry,
2071         &vmstate_spapr_pending_events,
2072         &vmstate_spapr_cap_htm,
2073         &vmstate_spapr_cap_vsx,
2074         &vmstate_spapr_cap_dfp,
2075         &vmstate_spapr_cap_cfpc,
2076         &vmstate_spapr_cap_sbbc,
2077         &vmstate_spapr_cap_ibs,
2078         &vmstate_spapr_cap_hpt_maxpagesize,
2079         &vmstate_spapr_irq_map,
2080         &vmstate_spapr_cap_nested_kvm_hv,
2081         &vmstate_spapr_dtb,
2082         &vmstate_spapr_cap_large_decr,
2083         &vmstate_spapr_cap_ccf_assist,
2084         &vmstate_spapr_cap_fwnmi,
2085         &vmstate_spapr_fwnmi,
2086         &vmstate_spapr_cap_rpt_invalidate,
2087         NULL
2088     }
2089 };
2090 
2091 static int htab_save_setup(QEMUFile *f, void *opaque)
2092 {
2093     SpaprMachineState *spapr = opaque;
2094 
2095     /* "Iteration" header */
2096     if (!spapr->htab_shift) {
2097         qemu_put_be32(f, -1);
2098     } else {
2099         qemu_put_be32(f, spapr->htab_shift);
2100     }
2101 
2102     if (spapr->htab) {
2103         spapr->htab_save_index = 0;
2104         spapr->htab_first_pass = true;
2105     } else {
2106         if (spapr->htab_shift) {
2107             assert(kvm_enabled());
2108         }
2109     }
2110 
2111 
2112     return 0;
2113 }
2114 
2115 static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr,
2116                             int chunkstart, int n_valid, int n_invalid)
2117 {
2118     qemu_put_be32(f, chunkstart);
2119     qemu_put_be16(f, n_valid);
2120     qemu_put_be16(f, n_invalid);
2121     qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
2122                     HASH_PTE_SIZE_64 * n_valid);
2123 }
2124 
2125 static void htab_save_end_marker(QEMUFile *f)
2126 {
2127     qemu_put_be32(f, 0);
2128     qemu_put_be16(f, 0);
2129     qemu_put_be16(f, 0);
2130 }
2131 
2132 static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr,
2133                                  int64_t max_ns)
2134 {
2135     bool has_timeout = max_ns != -1;
2136     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2137     int index = spapr->htab_save_index;
2138     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2139 
2140     assert(spapr->htab_first_pass);
2141 
2142     do {
2143         int chunkstart;
2144 
2145         /* Consume invalid HPTEs */
2146         while ((index < htabslots)
2147                && !HPTE_VALID(HPTE(spapr->htab, index))) {
2148             CLEAN_HPTE(HPTE(spapr->htab, index));
2149             index++;
2150         }
2151 
2152         /* Consume valid HPTEs */
2153         chunkstart = index;
2154         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2155                && HPTE_VALID(HPTE(spapr->htab, index))) {
2156             CLEAN_HPTE(HPTE(spapr->htab, index));
2157             index++;
2158         }
2159 
2160         if (index > chunkstart) {
2161             int n_valid = index - chunkstart;
2162 
2163             htab_save_chunk(f, spapr, chunkstart, n_valid, 0);
2164 
2165             if (has_timeout &&
2166                 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2167                 break;
2168             }
2169         }
2170     } while ((index < htabslots) && !migration_rate_exceeded(f));
2171 
2172     if (index >= htabslots) {
2173         assert(index == htabslots);
2174         index = 0;
2175         spapr->htab_first_pass = false;
2176     }
2177     spapr->htab_save_index = index;
2178 }
2179 
2180 static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
2181                                 int64_t max_ns)
2182 {
2183     bool final = max_ns < 0;
2184     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2185     int examined = 0, sent = 0;
2186     int index = spapr->htab_save_index;
2187     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2188 
2189     assert(!spapr->htab_first_pass);
2190 
2191     do {
2192         int chunkstart, invalidstart;
2193 
2194         /* Consume non-dirty HPTEs */
2195         while ((index < htabslots)
2196                && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
2197             index++;
2198             examined++;
2199         }
2200 
2201         chunkstart = index;
2202         /* Consume valid dirty HPTEs */
2203         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2204                && HPTE_DIRTY(HPTE(spapr->htab, index))
2205                && HPTE_VALID(HPTE(spapr->htab, index))) {
2206             CLEAN_HPTE(HPTE(spapr->htab, index));
2207             index++;
2208             examined++;
2209         }
2210 
2211         invalidstart = index;
2212         /* Consume invalid dirty HPTEs */
2213         while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
2214                && HPTE_DIRTY(HPTE(spapr->htab, index))
2215                && !HPTE_VALID(HPTE(spapr->htab, index))) {
2216             CLEAN_HPTE(HPTE(spapr->htab, index));
2217             index++;
2218             examined++;
2219         }
2220 
2221         if (index > chunkstart) {
2222             int n_valid = invalidstart - chunkstart;
2223             int n_invalid = index - invalidstart;
2224 
2225             htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid);
2226             sent += index - chunkstart;
2227 
2228             if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2229                 break;
2230             }
2231         }
2232 
2233         if (examined >= htabslots) {
2234             break;
2235         }
2236 
2237         if (index >= htabslots) {
2238             assert(index == htabslots);
2239             index = 0;
2240         }
2241     } while ((examined < htabslots) && (!migration_rate_exceeded(f) || final));
2242 
2243     if (index >= htabslots) {
2244         assert(index == htabslots);
2245         index = 0;
2246     }
2247 
2248     spapr->htab_save_index = index;
2249 
2250     return (examined >= htabslots) && (sent == 0) ? 1 : 0;
2251 }
2252 
2253 #define MAX_ITERATION_NS    5000000 /* 5 ms */
2254 #define MAX_KVM_BUF_SIZE    2048
2255 
2256 static int htab_save_iterate(QEMUFile *f, void *opaque)
2257 {
2258     SpaprMachineState *spapr = opaque;
2259     int fd;
2260     int rc = 0;
2261 
2262     /* Iteration header */
2263     if (!spapr->htab_shift) {
2264         qemu_put_be32(f, -1);
2265         return 1;
2266     } else {
2267         qemu_put_be32(f, 0);
2268     }
2269 
2270     if (!spapr->htab) {
2271         assert(kvm_enabled());
2272 
2273         fd = get_htab_fd(spapr);
2274         if (fd < 0) {
2275             return fd;
2276         }
2277 
2278         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
2279         if (rc < 0) {
2280             return rc;
2281         }
2282     } else  if (spapr->htab_first_pass) {
2283         htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
2284     } else {
2285         rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
2286     }
2287 
2288     htab_save_end_marker(f);
2289 
2290     return rc;
2291 }
2292 
2293 static int htab_save_complete(QEMUFile *f, void *opaque)
2294 {
2295     SpaprMachineState *spapr = opaque;
2296     int fd;
2297 
2298     /* Iteration header */
2299     if (!spapr->htab_shift) {
2300         qemu_put_be32(f, -1);
2301         return 0;
2302     } else {
2303         qemu_put_be32(f, 0);
2304     }
2305 
2306     if (!spapr->htab) {
2307         int rc;
2308 
2309         assert(kvm_enabled());
2310 
2311         fd = get_htab_fd(spapr);
2312         if (fd < 0) {
2313             return fd;
2314         }
2315 
2316         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
2317         if (rc < 0) {
2318             return rc;
2319         }
2320     } else {
2321         if (spapr->htab_first_pass) {
2322             htab_save_first_pass(f, spapr, -1);
2323         }
2324         htab_save_later_pass(f, spapr, -1);
2325     }
2326 
2327     /* End marker */
2328     htab_save_end_marker(f);
2329 
2330     return 0;
2331 }
2332 
2333 static int htab_load(QEMUFile *f, void *opaque, int version_id)
2334 {
2335     SpaprMachineState *spapr = opaque;
2336     uint32_t section_hdr;
2337     int fd = -1;
2338     Error *local_err = NULL;
2339 
2340     if (version_id < 1 || version_id > 1) {
2341         error_report("htab_load() bad version");
2342         return -EINVAL;
2343     }
2344 
2345     section_hdr = qemu_get_be32(f);
2346 
2347     if (section_hdr == -1) {
2348         spapr_free_hpt(spapr);
2349         return 0;
2350     }
2351 
2352     if (section_hdr) {
2353         int ret;
2354 
2355         /* First section gives the htab size */
2356         ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err);
2357         if (ret < 0) {
2358             error_report_err(local_err);
2359             return ret;
2360         }
2361         return 0;
2362     }
2363 
2364     if (!spapr->htab) {
2365         assert(kvm_enabled());
2366 
2367         fd = kvmppc_get_htab_fd(true, 0, &local_err);
2368         if (fd < 0) {
2369             error_report_err(local_err);
2370             return fd;
2371         }
2372     }
2373 
2374     while (true) {
2375         uint32_t index;
2376         uint16_t n_valid, n_invalid;
2377 
2378         index = qemu_get_be32(f);
2379         n_valid = qemu_get_be16(f);
2380         n_invalid = qemu_get_be16(f);
2381 
2382         if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
2383             /* End of Stream */
2384             break;
2385         }
2386 
2387         if ((index + n_valid + n_invalid) >
2388             (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
2389             /* Bad index in stream */
2390             error_report(
2391                 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2392                 index, n_valid, n_invalid, spapr->htab_shift);
2393             return -EINVAL;
2394         }
2395 
2396         if (spapr->htab) {
2397             if (n_valid) {
2398                 qemu_get_buffer(f, HPTE(spapr->htab, index),
2399                                 HASH_PTE_SIZE_64 * n_valid);
2400             }
2401             if (n_invalid) {
2402                 memset(HPTE(spapr->htab, index + n_valid), 0,
2403                        HASH_PTE_SIZE_64 * n_invalid);
2404             }
2405         } else {
2406             int rc;
2407 
2408             assert(fd >= 0);
2409 
2410             rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid,
2411                                         &local_err);
2412             if (rc < 0) {
2413                 error_report_err(local_err);
2414                 return rc;
2415             }
2416         }
2417     }
2418 
2419     if (!spapr->htab) {
2420         assert(fd >= 0);
2421         close(fd);
2422     }
2423 
2424     return 0;
2425 }
2426 
2427 static void htab_save_cleanup(void *opaque)
2428 {
2429     SpaprMachineState *spapr = opaque;
2430 
2431     close_htab_fd(spapr);
2432 }
2433 
2434 static SaveVMHandlers savevm_htab_handlers = {
2435     .save_setup = htab_save_setup,
2436     .save_live_iterate = htab_save_iterate,
2437     .save_live_complete_precopy = htab_save_complete,
2438     .save_cleanup = htab_save_cleanup,
2439     .load_state = htab_load,
2440 };
2441 
2442 static void spapr_boot_set(void *opaque, const char *boot_device,
2443                            Error **errp)
2444 {
2445     SpaprMachineState *spapr = SPAPR_MACHINE(opaque);
2446 
2447     g_free(spapr->boot_device);
2448     spapr->boot_device = g_strdup(boot_device);
2449 }
2450 
2451 static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr)
2452 {
2453     MachineState *machine = MACHINE(spapr);
2454     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
2455     uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
2456     int i;
2457 
2458     for (i = 0; i < nr_lmbs; i++) {
2459         uint64_t addr;
2460 
2461         addr = i * lmb_size + machine->device_memory->base;
2462         spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
2463                                addr / lmb_size);
2464     }
2465 }
2466 
2467 /*
2468  * If RAM size, maxmem size and individual node mem sizes aren't aligned
2469  * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2470  * since we can't support such unaligned sizes with DRCONF_MEMORY.
2471  */
2472 static void spapr_validate_node_memory(MachineState *machine, Error **errp)
2473 {
2474     int i;
2475 
2476     if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2477         error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
2478                    " is not aligned to %" PRIu64 " MiB",
2479                    machine->ram_size,
2480                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2481         return;
2482     }
2483 
2484     if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2485         error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
2486                    " is not aligned to %" PRIu64 " MiB",
2487                    machine->ram_size,
2488                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2489         return;
2490     }
2491 
2492     for (i = 0; i < machine->numa_state->num_nodes; i++) {
2493         if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
2494             error_setg(errp,
2495                        "Node %d memory size 0x%" PRIx64
2496                        " is not aligned to %" PRIu64 " MiB",
2497                        i, machine->numa_state->nodes[i].node_mem,
2498                        SPAPR_MEMORY_BLOCK_SIZE / MiB);
2499             return;
2500         }
2501     }
2502 }
2503 
2504 /* find cpu slot in machine->possible_cpus by core_id */
2505 static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
2506 {
2507     int index = id / ms->smp.threads;
2508 
2509     if (index >= ms->possible_cpus->len) {
2510         return NULL;
2511     }
2512     if (idx) {
2513         *idx = index;
2514     }
2515     return &ms->possible_cpus->cpus[index];
2516 }
2517 
2518 static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
2519 {
2520     MachineState *ms = MACHINE(spapr);
2521     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2522     Error *local_err = NULL;
2523     bool vsmt_user = !!spapr->vsmt;
2524     int kvm_smt = kvmppc_smt_threads();
2525     int ret;
2526     unsigned int smp_threads = ms->smp.threads;
2527 
2528     if (tcg_enabled()) {
2529         if (smp_threads > 1 &&
2530             !ppc_type_check_compat(ms->cpu_type, CPU_POWERPC_LOGICAL_2_07, 0,
2531                                    spapr->max_compat_pvr)) {
2532             error_setg(errp, "TCG only supports SMT on POWER8 or newer CPUs");
2533             return;
2534         }
2535 
2536         if (smp_threads > 8) {
2537             error_setg(errp, "TCG cannot support more than 8 threads/core "
2538                        "on a pseries machine");
2539             return;
2540         }
2541     }
2542     if (!is_power_of_2(smp_threads)) {
2543         error_setg(errp, "Cannot support %d threads/core on a pseries "
2544                    "machine because it must be a power of 2", smp_threads);
2545         return;
2546     }
2547 
2548     /* Detemine the VSMT mode to use: */
2549     if (vsmt_user) {
2550         if (spapr->vsmt < smp_threads) {
2551             error_setg(errp, "Cannot support VSMT mode %d"
2552                        " because it must be >= threads/core (%d)",
2553                        spapr->vsmt, smp_threads);
2554             return;
2555         }
2556         /* In this case, spapr->vsmt has been set by the command line */
2557     } else if (!smc->smp_threads_vsmt) {
2558         /*
2559          * Default VSMT value is tricky, because we need it to be as
2560          * consistent as possible (for migration), but this requires
2561          * changing it for at least some existing cases.  We pick 8 as
2562          * the value that we'd get with KVM on POWER8, the
2563          * overwhelmingly common case in production systems.
2564          */
2565         spapr->vsmt = MAX(8, smp_threads);
2566     } else {
2567         spapr->vsmt = smp_threads;
2568     }
2569 
2570     /* KVM: If necessary, set the SMT mode: */
2571     if (kvm_enabled() && (spapr->vsmt != kvm_smt)) {
2572         ret = kvmppc_set_smt_threads(spapr->vsmt);
2573         if (ret) {
2574             /* Looks like KVM isn't able to change VSMT mode */
2575             error_setg(&local_err,
2576                        "Failed to set KVM's VSMT mode to %d (errno %d)",
2577                        spapr->vsmt, ret);
2578             /* We can live with that if the default one is big enough
2579              * for the number of threads, and a submultiple of the one
2580              * we want.  In this case we'll waste some vcpu ids, but
2581              * behaviour will be correct */
2582             if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) {
2583                 warn_report_err(local_err);
2584             } else {
2585                 if (!vsmt_user) {
2586                     error_append_hint(&local_err,
2587                                       "On PPC, a VM with %d threads/core"
2588                                       " on a host with %d threads/core"
2589                                       " requires the use of VSMT mode %d.\n",
2590                                       smp_threads, kvm_smt, spapr->vsmt);
2591                 }
2592                 kvmppc_error_append_smt_possible_hint(&local_err);
2593                 error_propagate(errp, local_err);
2594             }
2595         }
2596     }
2597     /* else TCG: nothing to do currently */
2598 }
2599 
2600 static void spapr_init_cpus(SpaprMachineState *spapr)
2601 {
2602     MachineState *machine = MACHINE(spapr);
2603     MachineClass *mc = MACHINE_GET_CLASS(machine);
2604     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2605     const char *type = spapr_get_cpu_core_type(machine->cpu_type);
2606     const CPUArchIdList *possible_cpus;
2607     unsigned int smp_cpus = machine->smp.cpus;
2608     unsigned int smp_threads = machine->smp.threads;
2609     unsigned int max_cpus = machine->smp.max_cpus;
2610     int boot_cores_nr = smp_cpus / smp_threads;
2611     int i;
2612 
2613     possible_cpus = mc->possible_cpu_arch_ids(machine);
2614     if (mc->has_hotpluggable_cpus) {
2615         if (smp_cpus % smp_threads) {
2616             error_report("smp_cpus (%u) must be multiple of threads (%u)",
2617                          smp_cpus, smp_threads);
2618             exit(1);
2619         }
2620         if (max_cpus % smp_threads) {
2621             error_report("max_cpus (%u) must be multiple of threads (%u)",
2622                          max_cpus, smp_threads);
2623             exit(1);
2624         }
2625     } else {
2626         if (max_cpus != smp_cpus) {
2627             error_report("This machine version does not support CPU hotplug");
2628             exit(1);
2629         }
2630         boot_cores_nr = possible_cpus->len;
2631     }
2632 
2633     if (smc->pre_2_10_has_unused_icps) {
2634         int i;
2635 
2636         for (i = 0; i < spapr_max_server_number(spapr); i++) {
2637             /* Dummy entries get deregistered when real ICPState objects
2638              * are registered during CPU core hotplug.
2639              */
2640             pre_2_10_vmstate_register_dummy_icp(i);
2641         }
2642     }
2643 
2644     for (i = 0; i < possible_cpus->len; i++) {
2645         int core_id = i * smp_threads;
2646 
2647         if (mc->has_hotpluggable_cpus) {
2648             spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
2649                                    spapr_vcpu_id(spapr, core_id));
2650         }
2651 
2652         if (i < boot_cores_nr) {
2653             Object *core  = object_new(type);
2654             int nr_threads = smp_threads;
2655 
2656             /* Handle the partially filled core for older machine types */
2657             if ((i + 1) * smp_threads >= smp_cpus) {
2658                 nr_threads = smp_cpus - i * smp_threads;
2659             }
2660 
2661             object_property_set_int(core, "nr-threads", nr_threads,
2662                                     &error_fatal);
2663             object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id,
2664                                     &error_fatal);
2665             qdev_realize(DEVICE(core), NULL, &error_fatal);
2666 
2667             object_unref(core);
2668         }
2669     }
2670 }
2671 
2672 static PCIHostState *spapr_create_default_phb(void)
2673 {
2674     DeviceState *dev;
2675 
2676     dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE);
2677     qdev_prop_set_uint32(dev, "index", 0);
2678     sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
2679 
2680     return PCI_HOST_BRIDGE(dev);
2681 }
2682 
2683 static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp)
2684 {
2685     MachineState *machine = MACHINE(spapr);
2686     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2687     hwaddr rma_size = machine->ram_size;
2688     hwaddr node0_size = spapr_node0_size(machine);
2689 
2690     /* RMA has to fit in the first NUMA node */
2691     rma_size = MIN(rma_size, node0_size);
2692 
2693     /*
2694      * VRMA access is via a special 1TiB SLB mapping, so the RMA can
2695      * never exceed that
2696      */
2697     rma_size = MIN(rma_size, 1 * TiB);
2698 
2699     /*
2700      * Clamp the RMA size based on machine type.  This is for
2701      * migration compatibility with older qemu versions, which limited
2702      * the RMA size for complicated and mostly bad reasons.
2703      */
2704     if (smc->rma_limit) {
2705         rma_size = MIN(rma_size, smc->rma_limit);
2706     }
2707 
2708     if (rma_size < MIN_RMA_SLOF) {
2709         error_setg(errp,
2710                    "pSeries SLOF firmware requires >= %" HWADDR_PRIx
2711                    "ldMiB guest RMA (Real Mode Area memory)",
2712                    MIN_RMA_SLOF / MiB);
2713         return 0;
2714     }
2715 
2716     return rma_size;
2717 }
2718 
2719 static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr)
2720 {
2721     MachineState *machine = MACHINE(spapr);
2722     int i;
2723 
2724     for (i = 0; i < machine->ram_slots; i++) {
2725         spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i);
2726     }
2727 }
2728 
2729 /* pSeries LPAR / sPAPR hardware init */
2730 static void spapr_machine_init(MachineState *machine)
2731 {
2732     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
2733     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2734     MachineClass *mc = MACHINE_GET_CLASS(machine);
2735     const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME;
2736     const char *bios_name = machine->firmware ?: bios_default;
2737     g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
2738     const char *kernel_filename = machine->kernel_filename;
2739     const char *initrd_filename = machine->initrd_filename;
2740     PCIHostState *phb;
2741     bool has_vga;
2742     int i;
2743     MemoryRegion *sysmem = get_system_memory();
2744     long load_limit, fw_size;
2745     Error *resize_hpt_err = NULL;
2746 
2747     if (!filename) {
2748         error_report("Could not find LPAR firmware '%s'", bios_name);
2749         exit(1);
2750     }
2751     fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
2752     if (fw_size <= 0) {
2753         error_report("Could not load LPAR firmware '%s'", filename);
2754         exit(1);
2755     }
2756 
2757     /*
2758      * if Secure VM (PEF) support is configured, then initialize it
2759      */
2760     pef_kvm_init(machine->cgs, &error_fatal);
2761 
2762     msi_nonbroken = true;
2763 
2764     QLIST_INIT(&spapr->phbs);
2765     QTAILQ_INIT(&spapr->pending_dimm_unplugs);
2766 
2767     /* Determine capabilities to run with */
2768     spapr_caps_init(spapr);
2769 
2770     kvmppc_check_papr_resize_hpt(&resize_hpt_err);
2771     if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) {
2772         /*
2773          * If the user explicitly requested a mode we should either
2774          * supply it, or fail completely (which we do below).  But if
2775          * it's not set explicitly, we reset our mode to something
2776          * that works
2777          */
2778         if (resize_hpt_err) {
2779             spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
2780             error_free(resize_hpt_err);
2781             resize_hpt_err = NULL;
2782         } else {
2783             spapr->resize_hpt = smc->resize_hpt_default;
2784         }
2785     }
2786 
2787     assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT);
2788 
2789     if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) {
2790         /*
2791          * User requested HPT resize, but this host can't supply it.  Bail out
2792          */
2793         error_report_err(resize_hpt_err);
2794         exit(1);
2795     }
2796     error_free(resize_hpt_err);
2797 
2798     spapr->rma_size = spapr_rma_size(spapr, &error_fatal);
2799 
2800     /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2801     load_limit = MIN(spapr->rma_size, FDT_MAX_ADDR) - FW_OVERHEAD;
2802 
2803     /*
2804      * VSMT must be set in order to be able to compute VCPU ids, ie to
2805      * call spapr_max_server_number() or spapr_vcpu_id().
2806      */
2807     spapr_set_vsmt_mode(spapr, &error_fatal);
2808 
2809     /* Set up Interrupt Controller before we create the VCPUs */
2810     spapr_irq_init(spapr, &error_fatal);
2811 
2812     /* Set up containers for ibm,client-architecture-support negotiated options
2813      */
2814     spapr->ov5 = spapr_ovec_new();
2815     spapr->ov5_cas = spapr_ovec_new();
2816 
2817     if (smc->dr_lmb_enabled) {
2818         spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
2819         spapr_validate_node_memory(machine, &error_fatal);
2820     }
2821 
2822     spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
2823 
2824     /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */
2825     if (!smc->pre_6_2_numa_affinity) {
2826         spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY);
2827     }
2828 
2829     /* advertise support for dedicated HP event source to guests */
2830     if (spapr->use_hotplug_event_source) {
2831         spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
2832     }
2833 
2834     /* advertise support for HPT resizing */
2835     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
2836         spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE);
2837     }
2838 
2839     /* advertise support for ibm,dyamic-memory-v2 */
2840     spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2);
2841 
2842     /* advertise XIVE on POWER9 machines */
2843     if (spapr->irq->xive) {
2844         spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
2845     }
2846 
2847     /* init CPUs */
2848     spapr_init_cpus(spapr);
2849 
2850     spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine);
2851 
2852     /* Init numa_assoc_array */
2853     spapr_numa_associativity_init(spapr, machine);
2854 
2855     if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
2856         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
2857                               spapr->max_compat_pvr)) {
2858         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300);
2859         /* KVM and TCG always allow GTSE with radix... */
2860         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
2861     }
2862     /* ... but not with hash (currently). */
2863 
2864     if (kvm_enabled()) {
2865         /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2866         kvmppc_enable_logical_ci_hcalls();
2867         kvmppc_enable_set_mode_hcall();
2868 
2869         /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2870         kvmppc_enable_clear_ref_mod_hcalls();
2871 
2872         /* Enable H_PAGE_INIT */
2873         kvmppc_enable_h_page_init();
2874     }
2875 
2876     /* map RAM */
2877     memory_region_add_subregion(sysmem, 0, machine->ram);
2878 
2879     /* always allocate the device memory information */
2880     machine->device_memory = g_malloc0(sizeof(*machine->device_memory));
2881 
2882     /* initialize hotplug memory address space */
2883     if (machine->ram_size < machine->maxram_size) {
2884         ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
2885         /*
2886          * Limit the number of hotpluggable memory slots to half the number
2887          * slots that KVM supports, leaving the other half for PCI and other
2888          * devices. However ensure that number of slots doesn't drop below 32.
2889          */
2890         int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
2891                            SPAPR_MAX_RAM_SLOTS;
2892 
2893         if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
2894             max_memslots = SPAPR_MAX_RAM_SLOTS;
2895         }
2896         if (machine->ram_slots > max_memslots) {
2897             error_report("Specified number of memory slots %"
2898                          PRIu64" exceeds max supported %d",
2899                          machine->ram_slots, max_memslots);
2900             exit(1);
2901         }
2902 
2903         machine->device_memory->base = ROUND_UP(machine->ram_size,
2904                                                 SPAPR_DEVICE_MEM_ALIGN);
2905         memory_region_init(&machine->device_memory->mr, OBJECT(spapr),
2906                            "device-memory", device_mem_size);
2907         memory_region_add_subregion(sysmem, machine->device_memory->base,
2908                                     &machine->device_memory->mr);
2909     }
2910 
2911     if (smc->dr_lmb_enabled) {
2912         spapr_create_lmb_dr_connectors(spapr);
2913     }
2914 
2915     if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_ON) {
2916         /* Create the error string for live migration blocker */
2917         error_setg(&spapr->fwnmi_migration_blocker,
2918             "A machine check is being handled during migration. The handler"
2919             "may run and log hardware error on the destination");
2920     }
2921 
2922     if (mc->nvdimm_supported) {
2923         spapr_create_nvdimm_dr_connectors(spapr);
2924     }
2925 
2926     /* Set up RTAS event infrastructure */
2927     spapr_events_init(spapr);
2928 
2929     /* Set up the RTC RTAS interfaces */
2930     spapr_rtc_create(spapr);
2931 
2932     /* Set up VIO bus */
2933     spapr->vio_bus = spapr_vio_bus_init();
2934 
2935     for (i = 0; serial_hd(i); i++) {
2936         spapr_vty_create(spapr->vio_bus, serial_hd(i));
2937     }
2938 
2939     /* We always have at least the nvram device on VIO */
2940     spapr_create_nvram(spapr);
2941 
2942     /*
2943      * Setup hotplug / dynamic-reconfiguration connectors. top-level
2944      * connectors (described in root DT node's "ibm,drc-types" property)
2945      * are pre-initialized here. additional child connectors (such as
2946      * connectors for a PHBs PCI slots) are added as needed during their
2947      * parent's realization.
2948      */
2949     if (smc->dr_phb_enabled) {
2950         for (i = 0; i < SPAPR_MAX_PHBS; i++) {
2951             spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i);
2952         }
2953     }
2954 
2955     /* Set up PCI */
2956     spapr_pci_rtas_init();
2957 
2958     phb = spapr_create_default_phb();
2959 
2960     for (i = 0; i < nb_nics; i++) {
2961         NICInfo *nd = &nd_table[i];
2962 
2963         if (!nd->model) {
2964             nd->model = g_strdup("spapr-vlan");
2965         }
2966 
2967         if (g_str_equal(nd->model, "spapr-vlan") ||
2968             g_str_equal(nd->model, "ibmveth")) {
2969             spapr_vlan_create(spapr->vio_bus, nd);
2970         } else {
2971             pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
2972         }
2973     }
2974 
2975     for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
2976         spapr_vscsi_create(spapr->vio_bus);
2977     }
2978 
2979     /* Graphics */
2980     has_vga = spapr_vga_init(phb->bus, &error_fatal);
2981     if (has_vga) {
2982         spapr->want_stdout_path = !machine->enable_graphics;
2983         machine->usb |= defaults_enabled() && !machine->usb_disabled;
2984     } else {
2985         spapr->want_stdout_path = true;
2986     }
2987 
2988     if (machine->usb) {
2989         if (smc->use_ohci_by_default) {
2990             pci_create_simple(phb->bus, -1, "pci-ohci");
2991         } else {
2992             pci_create_simple(phb->bus, -1, "nec-usb-xhci");
2993         }
2994 
2995         if (has_vga) {
2996             USBBus *usb_bus = usb_bus_find(-1);
2997 
2998             usb_create_simple(usb_bus, "usb-kbd");
2999             usb_create_simple(usb_bus, "usb-mouse");
3000         }
3001     }
3002 
3003     if (kernel_filename) {
3004         uint64_t loaded_addr = 0;
3005 
3006         spapr->kernel_size = load_elf(kernel_filename, NULL,
3007                                       translate_kernel_address, spapr,
3008                                       NULL, &loaded_addr, NULL, NULL, 1,
3009                                       PPC_ELF_MACHINE, 0, 0);
3010         if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
3011             spapr->kernel_size = load_elf(kernel_filename, NULL,
3012                                           translate_kernel_address, spapr,
3013                                           NULL, &loaded_addr, NULL, NULL, 0,
3014                                           PPC_ELF_MACHINE, 0, 0);
3015             spapr->kernel_le = spapr->kernel_size > 0;
3016         }
3017         if (spapr->kernel_size < 0) {
3018             error_report("error loading %s: %s", kernel_filename,
3019                          load_elf_strerror(spapr->kernel_size));
3020             exit(1);
3021         }
3022 
3023         if (spapr->kernel_addr != loaded_addr) {
3024             warn_report("spapr: kernel_addr changed from 0x%"PRIx64
3025                         " to 0x%"PRIx64,
3026                         spapr->kernel_addr, loaded_addr);
3027             spapr->kernel_addr = loaded_addr;
3028         }
3029 
3030         /* load initrd */
3031         if (initrd_filename) {
3032             /* Try to locate the initrd in the gap between the kernel
3033              * and the firmware. Add a bit of space just in case
3034              */
3035             spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size
3036                                   + 0x1ffff) & ~0xffff;
3037             spapr->initrd_size = load_image_targphys(initrd_filename,
3038                                                      spapr->initrd_base,
3039                                                      load_limit
3040                                                      - spapr->initrd_base);
3041             if (spapr->initrd_size < 0) {
3042                 error_report("could not load initial ram disk '%s'",
3043                              initrd_filename);
3044                 exit(1);
3045             }
3046         }
3047     }
3048 
3049     /* FIXME: Should register things through the MachineState's qdev
3050      * interface, this is a legacy from the sPAPREnvironment structure
3051      * which predated MachineState but had a similar function */
3052     vmstate_register(NULL, 0, &vmstate_spapr, spapr);
3053     register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1,
3054                          &savevm_htab_handlers, spapr);
3055 
3056     qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine));
3057 
3058     qemu_register_boot_set(spapr_boot_set, spapr);
3059 
3060     /*
3061      * Nothing needs to be done to resume a suspended guest because
3062      * suspending does not change the machine state, so no need for
3063      * a ->wakeup method.
3064      */
3065     qemu_register_wakeup_support();
3066 
3067     if (kvm_enabled()) {
3068         /* to stop and start vmclock */
3069         qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
3070                                          &spapr->tb);
3071 
3072         kvmppc_spapr_enable_inkernel_multitce();
3073     }
3074 
3075     qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond);
3076     if (spapr->vof) {
3077         spapr->vof->fw_size = fw_size; /* for claim() on itself */
3078         spapr_register_hypercall(KVMPPC_H_VOF_CLIENT, spapr_h_vof_client);
3079     }
3080 
3081     spapr_watchdog_init(spapr);
3082 }
3083 
3084 #define DEFAULT_KVM_TYPE "auto"
3085 static int spapr_kvm_type(MachineState *machine, const char *vm_type)
3086 {
3087     /*
3088      * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to
3089      * accomodate the 'HV' and 'PV' formats that exists in the
3090      * wild. The 'auto' mode is being introduced already as
3091      * lower-case, thus we don't need to bother checking for
3092      * "AUTO".
3093      */
3094     if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) {
3095         return 0;
3096     }
3097 
3098     if (!g_ascii_strcasecmp(vm_type, "hv")) {
3099         return 1;
3100     }
3101 
3102     if (!g_ascii_strcasecmp(vm_type, "pr")) {
3103         return 2;
3104     }
3105 
3106     error_report("Unknown kvm-type specified '%s'", vm_type);
3107     exit(1);
3108 }
3109 
3110 /*
3111  * Implementation of an interface to adjust firmware path
3112  * for the bootindex property handling.
3113  */
3114 static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
3115                                    DeviceState *dev)
3116 {
3117 #define CAST(type, obj, name) \
3118     ((type *)object_dynamic_cast(OBJECT(obj), (name)))
3119     SCSIDevice *d = CAST(SCSIDevice,  dev, TYPE_SCSI_DEVICE);
3120     SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
3121     VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
3122     PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3123 
3124     if (d && bus) {
3125         void *spapr = CAST(void, bus->parent, "spapr-vscsi");
3126         VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
3127         USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
3128 
3129         if (spapr) {
3130             /*
3131              * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
3132              * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form
3133              * 0x8000 | (target << 8) | (bus << 5) | lun
3134              * (see the "Logical unit addressing format" table in SAM5)
3135              */
3136             unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun;
3137             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3138                                    (uint64_t)id << 48);
3139         } else if (virtio) {
3140             /*
3141              * We use SRP luns of the form 01000000 | (target << 8) | lun
3142              * in the top 32 bits of the 64-bit LUN
3143              * Note: the quote above is from SLOF and it is wrong,
3144              * the actual binding is:
3145              * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
3146              */
3147             unsigned id = 0x1000000 | (d->id << 16) | d->lun;
3148             if (d->lun >= 256) {
3149                 /* Use the LUN "flat space addressing method" */
3150                 id |= 0x4000;
3151             }
3152             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3153                                    (uint64_t)id << 32);
3154         } else if (usb) {
3155             /*
3156              * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
3157              * in the top 32 bits of the 64-bit LUN
3158              */
3159             unsigned usb_port = atoi(usb->port->path);
3160             unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
3161             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3162                                    (uint64_t)id << 32);
3163         }
3164     }
3165 
3166     /*
3167      * SLOF probes the USB devices, and if it recognizes that the device is a
3168      * storage device, it changes its name to "storage" instead of "usb-host",
3169      * and additionally adds a child node for the SCSI LUN, so the correct
3170      * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
3171      */
3172     if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
3173         USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
3174         if (usb_device_is_scsi_storage(usbdev)) {
3175             return g_strdup_printf("storage@%s/disk", usbdev->port->path);
3176         }
3177     }
3178 
3179     if (phb) {
3180         /* Replace "pci" with "pci@800000020000000" */
3181         return g_strdup_printf("pci@%"PRIX64, phb->buid);
3182     }
3183 
3184     if (vsc) {
3185         /* Same logic as virtio above */
3186         unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
3187         return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
3188     }
3189 
3190     if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
3191         /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
3192         PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3193         return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn));
3194     }
3195 
3196     if (pcidev) {
3197         return spapr_pci_fw_dev_name(pcidev);
3198     }
3199 
3200     return NULL;
3201 }
3202 
3203 static char *spapr_get_kvm_type(Object *obj, Error **errp)
3204 {
3205     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3206 
3207     return g_strdup(spapr->kvm_type);
3208 }
3209 
3210 static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
3211 {
3212     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3213 
3214     g_free(spapr->kvm_type);
3215     spapr->kvm_type = g_strdup(value);
3216 }
3217 
3218 static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
3219 {
3220     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3221 
3222     return spapr->use_hotplug_event_source;
3223 }
3224 
3225 static void spapr_set_modern_hotplug_events(Object *obj, bool value,
3226                                             Error **errp)
3227 {
3228     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3229 
3230     spapr->use_hotplug_event_source = value;
3231 }
3232 
3233 static bool spapr_get_msix_emulation(Object *obj, Error **errp)
3234 {
3235     return true;
3236 }
3237 
3238 static char *spapr_get_resize_hpt(Object *obj, Error **errp)
3239 {
3240     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3241 
3242     switch (spapr->resize_hpt) {
3243     case SPAPR_RESIZE_HPT_DEFAULT:
3244         return g_strdup("default");
3245     case SPAPR_RESIZE_HPT_DISABLED:
3246         return g_strdup("disabled");
3247     case SPAPR_RESIZE_HPT_ENABLED:
3248         return g_strdup("enabled");
3249     case SPAPR_RESIZE_HPT_REQUIRED:
3250         return g_strdup("required");
3251     }
3252     g_assert_not_reached();
3253 }
3254 
3255 static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp)
3256 {
3257     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3258 
3259     if (strcmp(value, "default") == 0) {
3260         spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT;
3261     } else if (strcmp(value, "disabled") == 0) {
3262         spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
3263     } else if (strcmp(value, "enabled") == 0) {
3264         spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED;
3265     } else if (strcmp(value, "required") == 0) {
3266         spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED;
3267     } else {
3268         error_setg(errp, "Bad value for \"resize-hpt\" property");
3269     }
3270 }
3271 
3272 static bool spapr_get_vof(Object *obj, Error **errp)
3273 {
3274     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3275 
3276     return spapr->vof != NULL;
3277 }
3278 
3279 static void spapr_set_vof(Object *obj, bool value, Error **errp)
3280 {
3281     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3282 
3283     if (spapr->vof) {
3284         vof_cleanup(spapr->vof);
3285         g_free(spapr->vof);
3286         spapr->vof = NULL;
3287     }
3288     if (!value) {
3289         return;
3290     }
3291     spapr->vof = g_malloc0(sizeof(*spapr->vof));
3292 }
3293 
3294 static char *spapr_get_ic_mode(Object *obj, Error **errp)
3295 {
3296     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3297 
3298     if (spapr->irq == &spapr_irq_xics_legacy) {
3299         return g_strdup("legacy");
3300     } else if (spapr->irq == &spapr_irq_xics) {
3301         return g_strdup("xics");
3302     } else if (spapr->irq == &spapr_irq_xive) {
3303         return g_strdup("xive");
3304     } else if (spapr->irq == &spapr_irq_dual) {
3305         return g_strdup("dual");
3306     }
3307     g_assert_not_reached();
3308 }
3309 
3310 static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp)
3311 {
3312     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3313 
3314     if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
3315         error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode");
3316         return;
3317     }
3318 
3319     /* The legacy IRQ backend can not be set */
3320     if (strcmp(value, "xics") == 0) {
3321         spapr->irq = &spapr_irq_xics;
3322     } else if (strcmp(value, "xive") == 0) {
3323         spapr->irq = &spapr_irq_xive;
3324     } else if (strcmp(value, "dual") == 0) {
3325         spapr->irq = &spapr_irq_dual;
3326     } else {
3327         error_setg(errp, "Bad value for \"ic-mode\" property");
3328     }
3329 }
3330 
3331 static char *spapr_get_host_model(Object *obj, Error **errp)
3332 {
3333     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3334 
3335     return g_strdup(spapr->host_model);
3336 }
3337 
3338 static void spapr_set_host_model(Object *obj, const char *value, Error **errp)
3339 {
3340     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3341 
3342     g_free(spapr->host_model);
3343     spapr->host_model = g_strdup(value);
3344 }
3345 
3346 static char *spapr_get_host_serial(Object *obj, Error **errp)
3347 {
3348     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3349 
3350     return g_strdup(spapr->host_serial);
3351 }
3352 
3353 static void spapr_set_host_serial(Object *obj, const char *value, Error **errp)
3354 {
3355     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3356 
3357     g_free(spapr->host_serial);
3358     spapr->host_serial = g_strdup(value);
3359 }
3360 
3361 static void spapr_instance_init(Object *obj)
3362 {
3363     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3364     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3365     MachineState *ms = MACHINE(spapr);
3366     MachineClass *mc = MACHINE_GET_CLASS(ms);
3367 
3368     /*
3369      * NVDIMM support went live in 5.1 without considering that, in
3370      * other archs, the user needs to enable NVDIMM support with the
3371      * 'nvdimm' machine option and the default behavior is NVDIMM
3372      * support disabled. It is too late to roll back to the standard
3373      * behavior without breaking 5.1 guests.
3374      */
3375     if (mc->nvdimm_supported) {
3376         ms->nvdimms_state->is_enabled = true;
3377     }
3378 
3379     spapr->htab_fd = -1;
3380     spapr->use_hotplug_event_source = true;
3381     spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE);
3382     object_property_add_str(obj, "kvm-type",
3383                             spapr_get_kvm_type, spapr_set_kvm_type);
3384     object_property_set_description(obj, "kvm-type",
3385                                     "Specifies the KVM virtualization mode (auto,"
3386                                     " hv, pr). Defaults to 'auto'. This mode will use"
3387                                     " any available KVM module loaded in the host,"
3388                                     " where kvm_hv takes precedence if both kvm_hv and"
3389                                     " kvm_pr are loaded.");
3390     object_property_add_bool(obj, "modern-hotplug-events",
3391                             spapr_get_modern_hotplug_events,
3392                             spapr_set_modern_hotplug_events);
3393     object_property_set_description(obj, "modern-hotplug-events",
3394                                     "Use dedicated hotplug event mechanism in"
3395                                     " place of standard EPOW events when possible"
3396                                     " (required for memory hot-unplug support)");
3397     ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
3398                             "Maximum permitted CPU compatibility mode");
3399 
3400     object_property_add_str(obj, "resize-hpt",
3401                             spapr_get_resize_hpt, spapr_set_resize_hpt);
3402     object_property_set_description(obj, "resize-hpt",
3403                                     "Resizing of the Hash Page Table (enabled, disabled, required)");
3404     object_property_add_uint32_ptr(obj, "vsmt",
3405                                    &spapr->vsmt, OBJ_PROP_FLAG_READWRITE);
3406     object_property_set_description(obj, "vsmt",
3407                                     "Virtual SMT: KVM behaves as if this were"
3408                                     " the host's SMT mode");
3409 
3410     object_property_add_bool(obj, "vfio-no-msix-emulation",
3411                              spapr_get_msix_emulation, NULL);
3412 
3413     object_property_add_uint64_ptr(obj, "kernel-addr",
3414                                    &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE);
3415     object_property_set_description(obj, "kernel-addr",
3416                                     stringify(KERNEL_LOAD_ADDR)
3417                                     " for -kernel is the default");
3418     spapr->kernel_addr = KERNEL_LOAD_ADDR;
3419 
3420     object_property_add_bool(obj, "x-vof", spapr_get_vof, spapr_set_vof);
3421     object_property_set_description(obj, "x-vof",
3422                                     "Enable Virtual Open Firmware (experimental)");
3423 
3424     /* The machine class defines the default interrupt controller mode */
3425     spapr->irq = smc->irq;
3426     object_property_add_str(obj, "ic-mode", spapr_get_ic_mode,
3427                             spapr_set_ic_mode);
3428     object_property_set_description(obj, "ic-mode",
3429                  "Specifies the interrupt controller mode (xics, xive, dual)");
3430 
3431     object_property_add_str(obj, "host-model",
3432         spapr_get_host_model, spapr_set_host_model);
3433     object_property_set_description(obj, "host-model",
3434         "Host model to advertise in guest device tree");
3435     object_property_add_str(obj, "host-serial",
3436         spapr_get_host_serial, spapr_set_host_serial);
3437     object_property_set_description(obj, "host-serial",
3438         "Host serial number to advertise in guest device tree");
3439 }
3440 
3441 static void spapr_machine_finalizefn(Object *obj)
3442 {
3443     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3444 
3445     g_free(spapr->kvm_type);
3446 }
3447 
3448 void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
3449 {
3450     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
3451     PowerPCCPU *cpu = POWERPC_CPU(cs);
3452     CPUPPCState *env = &cpu->env;
3453 
3454     cpu_synchronize_state(cs);
3455     /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */
3456     if (spapr->fwnmi_system_reset_addr != -1) {
3457         uint64_t rtas_addr, addr;
3458 
3459         /* get rtas addr from fdt */
3460         rtas_addr = spapr_get_rtas_addr();
3461         if (!rtas_addr) {
3462             qemu_system_guest_panicked(NULL);
3463             return;
3464         }
3465 
3466         addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2;
3467         stq_be_phys(&address_space_memory, addr, env->gpr[3]);
3468         stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0);
3469         env->gpr[3] = addr;
3470     }
3471     ppc_cpu_do_system_reset(cs);
3472     if (spapr->fwnmi_system_reset_addr != -1) {
3473         env->nip = spapr->fwnmi_system_reset_addr;
3474     }
3475 }
3476 
3477 static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
3478 {
3479     CPUState *cs;
3480 
3481     CPU_FOREACH(cs) {
3482         async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
3483     }
3484 }
3485 
3486 int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3487                           void *fdt, int *fdt_start_offset, Error **errp)
3488 {
3489     uint64_t addr;
3490     uint32_t node;
3491 
3492     addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE;
3493     node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP,
3494                                     &error_abort);
3495     *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr,
3496                                              SPAPR_MEMORY_BLOCK_SIZE);
3497     return 0;
3498 }
3499 
3500 static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
3501                            bool dedicated_hp_event_source)
3502 {
3503     SpaprDrc *drc;
3504     uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
3505     int i;
3506     uint64_t addr = addr_start;
3507     bool hotplugged = spapr_drc_hotplugged(dev);
3508 
3509     for (i = 0; i < nr_lmbs; i++) {
3510         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3511                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3512         g_assert(drc);
3513 
3514         /*
3515          * memory_device_get_free_addr() provided a range of free addresses
3516          * that doesn't overlap with any existing mapping at pre-plug. The
3517          * corresponding LMB DRCs are thus assumed to be all attachable.
3518          */
3519         spapr_drc_attach(drc, dev);
3520         if (!hotplugged) {
3521             spapr_drc_reset(drc);
3522         }
3523         addr += SPAPR_MEMORY_BLOCK_SIZE;
3524     }
3525     /* send hotplug notification to the
3526      * guest only in case of hotplugged memory
3527      */
3528     if (hotplugged) {
3529         if (dedicated_hp_event_source) {
3530             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3531                                   addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3532             g_assert(drc);
3533             spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3534                                                    nr_lmbs,
3535                                                    spapr_drc_index(drc));
3536         } else {
3537             spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
3538                                            nr_lmbs);
3539         }
3540     }
3541 }
3542 
3543 static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3544 {
3545     SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev);
3546     PCDIMMDevice *dimm = PC_DIMM(dev);
3547     uint64_t size, addr;
3548     int64_t slot;
3549     bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3550 
3551     size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort);
3552 
3553     pc_dimm_plug(dimm, MACHINE(ms));
3554 
3555     if (!is_nvdimm) {
3556         addr = object_property_get_uint(OBJECT(dimm),
3557                                         PC_DIMM_ADDR_PROP, &error_abort);
3558         spapr_add_lmbs(dev, addr, size,
3559                        spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT));
3560     } else {
3561         slot = object_property_get_int(OBJECT(dimm),
3562                                        PC_DIMM_SLOT_PROP, &error_abort);
3563         /* We should have valid slot number at this point */
3564         g_assert(slot >= 0);
3565         spapr_add_nvdimm(dev, slot);
3566     }
3567 }
3568 
3569 static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3570                                   Error **errp)
3571 {
3572     const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev);
3573     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3574     bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3575     PCDIMMDevice *dimm = PC_DIMM(dev);
3576     Error *local_err = NULL;
3577     uint64_t size;
3578     Object *memdev;
3579     hwaddr pagesize;
3580 
3581     if (!smc->dr_lmb_enabled) {
3582         error_setg(errp, "Memory hotplug not supported for this machine");
3583         return;
3584     }
3585 
3586     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
3587     if (local_err) {
3588         error_propagate(errp, local_err);
3589         return;
3590     }
3591 
3592     if (is_nvdimm) {
3593         if (!spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, errp)) {
3594             return;
3595         }
3596     } else if (size % SPAPR_MEMORY_BLOCK_SIZE) {
3597         error_setg(errp, "Hotplugged memory size must be a multiple of "
3598                    "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB);
3599         return;
3600     }
3601 
3602     memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP,
3603                                       &error_abort);
3604     pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev));
3605     if (!spapr_check_pagesize(spapr, pagesize, errp)) {
3606         return;
3607     }
3608 
3609     pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp);
3610 }
3611 
3612 struct SpaprDimmState {
3613     PCDIMMDevice *dimm;
3614     uint32_t nr_lmbs;
3615     QTAILQ_ENTRY(SpaprDimmState) next;
3616 };
3617 
3618 static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s,
3619                                                        PCDIMMDevice *dimm)
3620 {
3621     SpaprDimmState *dimm_state = NULL;
3622 
3623     QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
3624         if (dimm_state->dimm == dimm) {
3625             break;
3626         }
3627     }
3628     return dimm_state;
3629 }
3630 
3631 static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr,
3632                                                       uint32_t nr_lmbs,
3633                                                       PCDIMMDevice *dimm)
3634 {
3635     SpaprDimmState *ds = NULL;
3636 
3637     /*
3638      * If this request is for a DIMM whose removal had failed earlier
3639      * (due to guest's refusal to remove the LMBs), we would have this
3640      * dimm already in the pending_dimm_unplugs list. In that
3641      * case don't add again.
3642      */
3643     ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3644     if (!ds) {
3645         ds = g_new0(SpaprDimmState, 1);
3646         ds->nr_lmbs = nr_lmbs;
3647         ds->dimm = dimm;
3648         QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next);
3649     }
3650     return ds;
3651 }
3652 
3653 static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr,
3654                                               SpaprDimmState *dimm_state)
3655 {
3656     QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
3657     g_free(dimm_state);
3658 }
3659 
3660 static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms,
3661                                                         PCDIMMDevice *dimm)
3662 {
3663     SpaprDrc *drc;
3664     uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm),
3665                                                   &error_abort);
3666     uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3667     uint32_t avail_lmbs = 0;
3668     uint64_t addr_start, addr;
3669     int i;
3670 
3671     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3672                                           &error_abort);
3673 
3674     addr = addr_start;
3675     for (i = 0; i < nr_lmbs; i++) {
3676         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3677                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3678         g_assert(drc);
3679         if (drc->dev) {
3680             avail_lmbs++;
3681         }
3682         addr += SPAPR_MEMORY_BLOCK_SIZE;
3683     }
3684 
3685     return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm);
3686 }
3687 
3688 void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev)
3689 {
3690     SpaprDimmState *ds;
3691     PCDIMMDevice *dimm;
3692     SpaprDrc *drc;
3693     uint32_t nr_lmbs;
3694     uint64_t size, addr_start, addr;
3695     g_autofree char *qapi_error = NULL;
3696     int i;
3697 
3698     if (!dev) {
3699         return;
3700     }
3701 
3702     dimm = PC_DIMM(dev);
3703     ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3704 
3705     /*
3706      * 'ds == NULL' would mean that the DIMM doesn't have a pending
3707      * unplug state, but one of its DRC is marked as unplug_requested.
3708      * This is bad and weird enough to g_assert() out.
3709      */
3710     g_assert(ds);
3711 
3712     spapr_pending_dimm_unplugs_remove(spapr, ds);
3713 
3714     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3715     nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3716 
3717     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3718                                           &error_abort);
3719 
3720     addr = addr_start;
3721     for (i = 0; i < nr_lmbs; i++) {
3722         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3723                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3724         g_assert(drc);
3725 
3726         drc->unplug_requested = false;
3727         addr += SPAPR_MEMORY_BLOCK_SIZE;
3728     }
3729 
3730     /*
3731      * Tell QAPI that something happened and the memory
3732      * hotunplug wasn't successful. Keep sending
3733      * MEM_UNPLUG_ERROR even while sending
3734      * DEVICE_UNPLUG_GUEST_ERROR until the deprecation of
3735      * MEM_UNPLUG_ERROR is due.
3736      */
3737     qapi_error = g_strdup_printf("Memory hotunplug rejected by the guest "
3738                                  "for device %s", dev->id);
3739 
3740     qapi_event_send_mem_unplug_error(dev->id ? : "", qapi_error);
3741 
3742     qapi_event_send_device_unplug_guest_error(dev->id,
3743                                               dev->canonical_path);
3744 }
3745 
3746 /* Callback to be called during DRC release. */
3747 void spapr_lmb_release(DeviceState *dev)
3748 {
3749     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3750     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl);
3751     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3752 
3753     /* This information will get lost if a migration occurs
3754      * during the unplug process. In this case recover it. */
3755     if (ds == NULL) {
3756         ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
3757         g_assert(ds);
3758         /* The DRC being examined by the caller at least must be counted */
3759         g_assert(ds->nr_lmbs);
3760     }
3761 
3762     if (--ds->nr_lmbs) {
3763         return;
3764     }
3765 
3766     /*
3767      * Now that all the LMBs have been removed by the guest, call the
3768      * unplug handler chain. This can never fail.
3769      */
3770     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3771     object_unparent(OBJECT(dev));
3772 }
3773 
3774 static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3775 {
3776     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3777     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3778 
3779     /* We really shouldn't get this far without anything to unplug */
3780     g_assert(ds);
3781 
3782     pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev));
3783     qdev_unrealize(dev);
3784     spapr_pending_dimm_unplugs_remove(spapr, ds);
3785 }
3786 
3787 static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
3788                                         DeviceState *dev, Error **errp)
3789 {
3790     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3791     PCDIMMDevice *dimm = PC_DIMM(dev);
3792     uint32_t nr_lmbs;
3793     uint64_t size, addr_start, addr;
3794     int i;
3795     SpaprDrc *drc;
3796 
3797     if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
3798         error_setg(errp, "nvdimm device hot unplug is not supported yet.");
3799         return;
3800     }
3801 
3802     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3803     nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3804 
3805     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3806                                           &error_abort);
3807 
3808     /*
3809      * An existing pending dimm state for this DIMM means that there is an
3810      * unplug operation in progress, waiting for the spapr_lmb_release
3811      * callback to complete the job (BQL can't cover that far). In this case,
3812      * bail out to avoid detaching DRCs that were already released.
3813      */
3814     if (spapr_pending_dimm_unplugs_find(spapr, dimm)) {
3815         error_setg(errp, "Memory unplug already in progress for device %s",
3816                    dev->id);
3817         return;
3818     }
3819 
3820     spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm);
3821 
3822     addr = addr_start;
3823     for (i = 0; i < nr_lmbs; i++) {
3824         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3825                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3826         g_assert(drc);
3827 
3828         spapr_drc_unplug_request(drc);
3829         addr += SPAPR_MEMORY_BLOCK_SIZE;
3830     }
3831 
3832     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3833                           addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3834     spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3835                                               nr_lmbs, spapr_drc_index(drc));
3836 }
3837 
3838 /* Callback to be called during DRC release. */
3839 void spapr_core_release(DeviceState *dev)
3840 {
3841     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3842 
3843     /* Call the unplug handler chain. This can never fail. */
3844     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3845     object_unparent(OBJECT(dev));
3846 }
3847 
3848 static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3849 {
3850     MachineState *ms = MACHINE(hotplug_dev);
3851     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
3852     CPUCore *cc = CPU_CORE(dev);
3853     CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
3854 
3855     if (smc->pre_2_10_has_unused_icps) {
3856         SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
3857         int i;
3858 
3859         for (i = 0; i < cc->nr_threads; i++) {
3860             CPUState *cs = CPU(sc->threads[i]);
3861 
3862             pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
3863         }
3864     }
3865 
3866     assert(core_slot);
3867     core_slot->cpu = NULL;
3868     qdev_unrealize(dev);
3869 }
3870 
3871 static
3872 void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
3873                                Error **errp)
3874 {
3875     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3876     int index;
3877     SpaprDrc *drc;
3878     CPUCore *cc = CPU_CORE(dev);
3879 
3880     if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
3881         error_setg(errp, "Unable to find CPU core with core-id: %d",
3882                    cc->core_id);
3883         return;
3884     }
3885     if (index == 0) {
3886         error_setg(errp, "Boot CPU core may not be unplugged");
3887         return;
3888     }
3889 
3890     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3891                           spapr_vcpu_id(spapr, cc->core_id));
3892     g_assert(drc);
3893 
3894     if (!spapr_drc_unplug_requested(drc)) {
3895         spapr_drc_unplug_request(drc);
3896     }
3897 
3898     /*
3899      * spapr_hotplug_req_remove_by_index is left unguarded, out of the
3900      * "!spapr_drc_unplug_requested" check, to allow for multiple IRQ
3901      * pulses removing the same CPU. Otherwise, in an failed hotunplug
3902      * attempt (e.g. the kernel will refuse to remove the last online
3903      * CPU), we will never attempt it again because unplug_requested
3904      * will still be 'true' in that case.
3905      */
3906     spapr_hotplug_req_remove_by_index(drc);
3907 }
3908 
3909 int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3910                            void *fdt, int *fdt_start_offset, Error **errp)
3911 {
3912     SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev);
3913     CPUState *cs = CPU(core->threads[0]);
3914     PowerPCCPU *cpu = POWERPC_CPU(cs);
3915     DeviceClass *dc = DEVICE_GET_CLASS(cs);
3916     int id = spapr_get_vcpu_id(cpu);
3917     g_autofree char *nodename = NULL;
3918     int offset;
3919 
3920     nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
3921     offset = fdt_add_subnode(fdt, 0, nodename);
3922 
3923     spapr_dt_cpu(cs, fdt, offset, spapr);
3924 
3925     /*
3926      * spapr_dt_cpu() does not fill the 'name' property in the
3927      * CPU node. The function is called during boot process, before
3928      * and after CAS, and overwriting the 'name' property written
3929      * by SLOF is not allowed.
3930      *
3931      * Write it manually after spapr_dt_cpu(). This makes the hotplug
3932      * CPUs more compatible with the coldplugged ones, which have
3933      * the 'name' property. Linux Kernel also relies on this
3934      * property to identify CPU nodes.
3935      */
3936     _FDT((fdt_setprop_string(fdt, offset, "name", nodename)));
3937 
3938     *fdt_start_offset = offset;
3939     return 0;
3940 }
3941 
3942 static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3943 {
3944     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3945     MachineClass *mc = MACHINE_GET_CLASS(spapr);
3946     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3947     SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev));
3948     CPUCore *cc = CPU_CORE(dev);
3949     CPUState *cs;
3950     SpaprDrc *drc;
3951     CPUArchId *core_slot;
3952     int index;
3953     bool hotplugged = spapr_drc_hotplugged(dev);
3954     int i;
3955 
3956     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3957     g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */
3958 
3959     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3960                           spapr_vcpu_id(spapr, cc->core_id));
3961 
3962     g_assert(drc || !mc->has_hotpluggable_cpus);
3963 
3964     if (drc) {
3965         /*
3966          * spapr_core_pre_plug() already buys us this is a brand new
3967          * core being plugged into a free slot. Nothing should already
3968          * be attached to the corresponding DRC.
3969          */
3970         spapr_drc_attach(drc, dev);
3971 
3972         if (hotplugged) {
3973             /*
3974              * Send hotplug notification interrupt to the guest only
3975              * in case of hotplugged CPUs.
3976              */
3977             spapr_hotplug_req_add_by_index(drc);
3978         } else {
3979             spapr_drc_reset(drc);
3980         }
3981     }
3982 
3983     core_slot->cpu = OBJECT(dev);
3984 
3985     /*
3986      * Set compatibility mode to match the boot CPU, which was either set
3987      * by the machine reset code or by CAS. This really shouldn't fail at
3988      * this point.
3989      */
3990     if (hotplugged) {
3991         for (i = 0; i < cc->nr_threads; i++) {
3992             ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr,
3993                            &error_abort);
3994         }
3995     }
3996 
3997     if (smc->pre_2_10_has_unused_icps) {
3998         for (i = 0; i < cc->nr_threads; i++) {
3999             cs = CPU(core->threads[i]);
4000             pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
4001         }
4002     }
4003 }
4004 
4005 static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4006                                 Error **errp)
4007 {
4008     MachineState *machine = MACHINE(OBJECT(hotplug_dev));
4009     MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
4010     CPUCore *cc = CPU_CORE(dev);
4011     const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type);
4012     const char *type = object_get_typename(OBJECT(dev));
4013     CPUArchId *core_slot;
4014     int index;
4015     unsigned int smp_threads = machine->smp.threads;
4016 
4017     if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
4018         error_setg(errp, "CPU hotplug not supported for this machine");
4019         return;
4020     }
4021 
4022     if (strcmp(base_core_type, type)) {
4023         error_setg(errp, "CPU core type should be %s", base_core_type);
4024         return;
4025     }
4026 
4027     if (cc->core_id % smp_threads) {
4028         error_setg(errp, "invalid core id %d", cc->core_id);
4029         return;
4030     }
4031 
4032     /*
4033      * In general we should have homogeneous threads-per-core, but old
4034      * (pre hotplug support) machine types allow the last core to have
4035      * reduced threads as a compatibility hack for when we allowed
4036      * total vcpus not a multiple of threads-per-core.
4037      */
4038     if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
4039         error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads,
4040                    smp_threads);
4041         return;
4042     }
4043 
4044     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
4045     if (!core_slot) {
4046         error_setg(errp, "core id %d out of range", cc->core_id);
4047         return;
4048     }
4049 
4050     if (core_slot->cpu) {
4051         error_setg(errp, "core %d already populated", cc->core_id);
4052         return;
4053     }
4054 
4055     numa_cpu_pre_plug(core_slot, dev, errp);
4056 }
4057 
4058 int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
4059                           void *fdt, int *fdt_start_offset, Error **errp)
4060 {
4061     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev);
4062     int intc_phandle;
4063 
4064     intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp);
4065     if (intc_phandle <= 0) {
4066         return -1;
4067     }
4068 
4069     if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) {
4070         error_setg(errp, "unable to create FDT node for PHB %d", sphb->index);
4071         return -1;
4072     }
4073 
4074     /* generally SLOF creates these, for hotplug it's up to QEMU */
4075     _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci"));
4076 
4077     return 0;
4078 }
4079 
4080 static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4081                                Error **errp)
4082 {
4083     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4084     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4085     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
4086     const unsigned windows_supported = spapr_phb_windows_supported(sphb);
4087     SpaprDrc *drc;
4088 
4089     if (dev->hotplugged && !smc->dr_phb_enabled) {
4090         error_setg(errp, "PHB hotplug not supported for this machine");
4091         return false;
4092     }
4093 
4094     if (sphb->index == (uint32_t)-1) {
4095         error_setg(errp, "\"index\" for PAPR PHB is mandatory");
4096         return false;
4097     }
4098 
4099     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4100     if (drc && drc->dev) {
4101         error_setg(errp, "PHB %d already attached", sphb->index);
4102         return false;
4103     }
4104 
4105     /*
4106      * This will check that sphb->index doesn't exceed the maximum number of
4107      * PHBs for the current machine type.
4108      */
4109     return
4110         smc->phb_placement(spapr, sphb->index,
4111                            &sphb->buid, &sphb->io_win_addr,
4112                            &sphb->mem_win_addr, &sphb->mem64_win_addr,
4113                            windows_supported, sphb->dma_liobn,
4114                            &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr,
4115                            errp);
4116 }
4117 
4118 static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
4119 {
4120     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4121     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
4122     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4123     SpaprDrc *drc;
4124     bool hotplugged = spapr_drc_hotplugged(dev);
4125 
4126     if (!smc->dr_phb_enabled) {
4127         return;
4128     }
4129 
4130     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4131     /* hotplug hooks should check it's enabled before getting this far */
4132     assert(drc);
4133 
4134     /* spapr_phb_pre_plug() already checked the DRC is attachable */
4135     spapr_drc_attach(drc, dev);
4136 
4137     if (hotplugged) {
4138         spapr_hotplug_req_add_by_index(drc);
4139     } else {
4140         spapr_drc_reset(drc);
4141     }
4142 }
4143 
4144 void spapr_phb_release(DeviceState *dev)
4145 {
4146     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
4147 
4148     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
4149     object_unparent(OBJECT(dev));
4150 }
4151 
4152 static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
4153 {
4154     qdev_unrealize(dev);
4155 }
4156 
4157 static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev,
4158                                      DeviceState *dev, Error **errp)
4159 {
4160     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4161     SpaprDrc *drc;
4162 
4163     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4164     assert(drc);
4165 
4166     if (!spapr_drc_unplug_requested(drc)) {
4167         spapr_drc_unplug_request(drc);
4168         spapr_hotplug_req_remove_by_index(drc);
4169     } else {
4170         error_setg(errp,
4171                    "PCI Host Bridge unplug already in progress for device %s",
4172                    dev->id);
4173     }
4174 }
4175 
4176 static
4177 bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4178                               Error **errp)
4179 {
4180     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4181 
4182     if (spapr->tpm_proxy != NULL) {
4183         error_setg(errp, "Only one TPM proxy can be specified for this machine");
4184         return false;
4185     }
4186 
4187     return true;
4188 }
4189 
4190 static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
4191 {
4192     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4193     SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev);
4194 
4195     /* Already checked in spapr_tpm_proxy_pre_plug() */
4196     g_assert(spapr->tpm_proxy == NULL);
4197 
4198     spapr->tpm_proxy = tpm_proxy;
4199 }
4200 
4201 static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
4202 {
4203     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4204 
4205     qdev_unrealize(dev);
4206     object_unparent(OBJECT(dev));
4207     spapr->tpm_proxy = NULL;
4208 }
4209 
4210 static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
4211                                       DeviceState *dev, Error **errp)
4212 {
4213     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4214         spapr_memory_plug(hotplug_dev, dev);
4215     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4216         spapr_core_plug(hotplug_dev, dev);
4217     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4218         spapr_phb_plug(hotplug_dev, dev);
4219     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4220         spapr_tpm_proxy_plug(hotplug_dev, dev);
4221     }
4222 }
4223 
4224 static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
4225                                         DeviceState *dev, Error **errp)
4226 {
4227     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4228         spapr_memory_unplug(hotplug_dev, dev);
4229     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4230         spapr_core_unplug(hotplug_dev, dev);
4231     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4232         spapr_phb_unplug(hotplug_dev, dev);
4233     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4234         spapr_tpm_proxy_unplug(hotplug_dev, dev);
4235     }
4236 }
4237 
4238 bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr)
4239 {
4240     return spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT) ||
4241         /*
4242          * CAS will process all pending unplug requests.
4243          *
4244          * HACK: a guest could theoretically have cleared all bits in OV5,
4245          * but none of the guests we care for do.
4246          */
4247         spapr_ovec_empty(spapr->ov5_cas);
4248 }
4249 
4250 static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
4251                                                 DeviceState *dev, Error **errp)
4252 {
4253     SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev));
4254     MachineClass *mc = MACHINE_GET_CLASS(sms);
4255     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4256 
4257     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4258         if (spapr_memory_hot_unplug_supported(sms)) {
4259             spapr_memory_unplug_request(hotplug_dev, dev, errp);
4260         } else {
4261             error_setg(errp, "Memory hot unplug not supported for this guest");
4262         }
4263     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4264         if (!mc->has_hotpluggable_cpus) {
4265             error_setg(errp, "CPU hot unplug not supported on this machine");
4266             return;
4267         }
4268         spapr_core_unplug_request(hotplug_dev, dev, errp);
4269     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4270         if (!smc->dr_phb_enabled) {
4271             error_setg(errp, "PHB hot unplug not supported on this machine");
4272             return;
4273         }
4274         spapr_phb_unplug_request(hotplug_dev, dev, errp);
4275     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4276         spapr_tpm_proxy_unplug(hotplug_dev, dev);
4277     }
4278 }
4279 
4280 static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
4281                                           DeviceState *dev, Error **errp)
4282 {
4283     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4284         spapr_memory_pre_plug(hotplug_dev, dev, errp);
4285     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4286         spapr_core_pre_plug(hotplug_dev, dev, errp);
4287     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4288         spapr_phb_pre_plug(hotplug_dev, dev, errp);
4289     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4290         spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp);
4291     }
4292 }
4293 
4294 static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
4295                                                  DeviceState *dev)
4296 {
4297     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
4298         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) ||
4299         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) ||
4300         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4301         return HOTPLUG_HANDLER(machine);
4302     }
4303     if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
4304         PCIDevice *pcidev = PCI_DEVICE(dev);
4305         PCIBus *root = pci_device_root_bus(pcidev);
4306         SpaprPhbState *phb =
4307             (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent),
4308                                                  TYPE_SPAPR_PCI_HOST_BRIDGE);
4309 
4310         if (phb) {
4311             return HOTPLUG_HANDLER(phb);
4312         }
4313     }
4314     return NULL;
4315 }
4316 
4317 static CpuInstanceProperties
4318 spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
4319 {
4320     CPUArchId *core_slot;
4321     MachineClass *mc = MACHINE_GET_CLASS(machine);
4322 
4323     /* make sure possible_cpu are intialized */
4324     mc->possible_cpu_arch_ids(machine);
4325     /* get CPU core slot containing thread that matches cpu_index */
4326     core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
4327     assert(core_slot);
4328     return core_slot->props;
4329 }
4330 
4331 static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx)
4332 {
4333     return idx / ms->smp.cores % ms->numa_state->num_nodes;
4334 }
4335 
4336 static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
4337 {
4338     int i;
4339     unsigned int smp_threads = machine->smp.threads;
4340     unsigned int smp_cpus = machine->smp.cpus;
4341     const char *core_type;
4342     int spapr_max_cores = machine->smp.max_cpus / smp_threads;
4343     MachineClass *mc = MACHINE_GET_CLASS(machine);
4344 
4345     if (!mc->has_hotpluggable_cpus) {
4346         spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
4347     }
4348     if (machine->possible_cpus) {
4349         assert(machine->possible_cpus->len == spapr_max_cores);
4350         return machine->possible_cpus;
4351     }
4352 
4353     core_type = spapr_get_cpu_core_type(machine->cpu_type);
4354     if (!core_type) {
4355         error_report("Unable to find sPAPR CPU Core definition");
4356         exit(1);
4357     }
4358 
4359     machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
4360                              sizeof(CPUArchId) * spapr_max_cores);
4361     machine->possible_cpus->len = spapr_max_cores;
4362     for (i = 0; i < machine->possible_cpus->len; i++) {
4363         int core_id = i * smp_threads;
4364 
4365         machine->possible_cpus->cpus[i].type = core_type;
4366         machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
4367         machine->possible_cpus->cpus[i].arch_id = core_id;
4368         machine->possible_cpus->cpus[i].props.has_core_id = true;
4369         machine->possible_cpus->cpus[i].props.core_id = core_id;
4370     }
4371     return machine->possible_cpus;
4372 }
4373 
4374 static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
4375                                 uint64_t *buid, hwaddr *pio,
4376                                 hwaddr *mmio32, hwaddr *mmio64,
4377                                 unsigned n_dma, uint32_t *liobns,
4378                                 hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
4379 {
4380     /*
4381      * New-style PHB window placement.
4382      *
4383      * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
4384      * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
4385      * windows.
4386      *
4387      * Some guest kernels can't work with MMIO windows above 1<<46
4388      * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
4389      *
4390      * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
4391      * PHB stacked together.  (32TiB+2GiB)..(32TiB+64GiB) contains the
4392      * 2GiB 32-bit MMIO windows for each PHB.  Then 33..64TiB has the
4393      * 1TiB 64-bit MMIO windows for each PHB.
4394      */
4395     const uint64_t base_buid = 0x800000020000000ULL;
4396     int i;
4397 
4398     /* Sanity check natural alignments */
4399     QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4400     QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4401     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
4402     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
4403     /* Sanity check bounds */
4404     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
4405                       SPAPR_PCI_MEM32_WIN_SIZE);
4406     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
4407                       SPAPR_PCI_MEM64_WIN_SIZE);
4408 
4409     if (index >= SPAPR_MAX_PHBS) {
4410         error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
4411                    SPAPR_MAX_PHBS - 1);
4412         return false;
4413     }
4414 
4415     *buid = base_buid + index;
4416     for (i = 0; i < n_dma; ++i) {
4417         liobns[i] = SPAPR_PCI_LIOBN(index, i);
4418     }
4419 
4420     *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
4421     *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
4422     *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
4423 
4424     *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE;
4425     *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE;
4426     return true;
4427 }
4428 
4429 static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
4430 {
4431     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4432 
4433     return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
4434 }
4435 
4436 static void spapr_ics_resend(XICSFabric *dev)
4437 {
4438     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4439 
4440     ics_resend(spapr->ics);
4441 }
4442 
4443 static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
4444 {
4445     PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
4446 
4447     return cpu ? spapr_cpu_state(cpu)->icp : NULL;
4448 }
4449 
4450 static void spapr_pic_print_info(InterruptStatsProvider *obj,
4451                                  Monitor *mon)
4452 {
4453     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
4454 
4455     spapr_irq_print_info(spapr, mon);
4456     monitor_printf(mon, "irqchip: %s\n",
4457                    kvm_irqchip_in_kernel() ? "in-kernel" : "emulated");
4458 }
4459 
4460 /*
4461  * This is a XIVE only operation
4462  */
4463 static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
4464                            uint8_t nvt_blk, uint32_t nvt_idx,
4465                            bool cam_ignore, uint8_t priority,
4466                            uint32_t logic_serv, XiveTCTXMatch *match)
4467 {
4468     SpaprMachineState *spapr = SPAPR_MACHINE(xfb);
4469     XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc);
4470     XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
4471     int count;
4472 
4473     count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
4474                            priority, logic_serv, match);
4475     if (count < 0) {
4476         return count;
4477     }
4478 
4479     /*
4480      * When we implement the save and restore of the thread interrupt
4481      * contexts in the enter/exit CPU handlers of the machine and the
4482      * escalations in QEMU, we should be able to handle non dispatched
4483      * vCPUs.
4484      *
4485      * Until this is done, the sPAPR machine should find at least one
4486      * matching context always.
4487      */
4488     if (count == 0) {
4489         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n",
4490                       nvt_blk, nvt_idx);
4491     }
4492 
4493     return count;
4494 }
4495 
4496 int spapr_get_vcpu_id(PowerPCCPU *cpu)
4497 {
4498     return cpu->vcpu_id;
4499 }
4500 
4501 bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp)
4502 {
4503     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
4504     MachineState *ms = MACHINE(spapr);
4505     int vcpu_id;
4506 
4507     vcpu_id = spapr_vcpu_id(spapr, cpu_index);
4508 
4509     if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) {
4510         error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id);
4511         error_append_hint(errp, "Adjust the number of cpus to %d "
4512                           "or try to raise the number of threads per core\n",
4513                           vcpu_id * ms->smp.threads / spapr->vsmt);
4514         return false;
4515     }
4516 
4517     cpu->vcpu_id = vcpu_id;
4518     return true;
4519 }
4520 
4521 PowerPCCPU *spapr_find_cpu(int vcpu_id)
4522 {
4523     CPUState *cs;
4524 
4525     CPU_FOREACH(cs) {
4526         PowerPCCPU *cpu = POWERPC_CPU(cs);
4527 
4528         if (spapr_get_vcpu_id(cpu) == vcpu_id) {
4529             return cpu;
4530         }
4531     }
4532 
4533     return NULL;
4534 }
4535 
4536 static bool spapr_cpu_in_nested(PowerPCCPU *cpu)
4537 {
4538     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4539 
4540     return spapr_cpu->in_nested;
4541 }
4542 
4543 static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4544 {
4545     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4546 
4547     /* These are only called by TCG, KVM maintains dispatch state */
4548 
4549     spapr_cpu->prod = false;
4550     if (spapr_cpu->vpa_addr) {
4551         CPUState *cs = CPU(cpu);
4552         uint32_t dispatch;
4553 
4554         dispatch = ldl_be_phys(cs->as,
4555                                spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4556         dispatch++;
4557         if ((dispatch & 1) != 0) {
4558             qemu_log_mask(LOG_GUEST_ERROR,
4559                           "VPA: incorrect dispatch counter value for "
4560                           "dispatched partition %u, correcting.\n", dispatch);
4561             dispatch++;
4562         }
4563         stl_be_phys(cs->as,
4564                     spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4565     }
4566 }
4567 
4568 static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4569 {
4570     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4571 
4572     if (spapr_cpu->vpa_addr) {
4573         CPUState *cs = CPU(cpu);
4574         uint32_t dispatch;
4575 
4576         dispatch = ldl_be_phys(cs->as,
4577                                spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4578         dispatch++;
4579         if ((dispatch & 1) != 1) {
4580             qemu_log_mask(LOG_GUEST_ERROR,
4581                           "VPA: incorrect dispatch counter value for "
4582                           "preempted partition %u, correcting.\n", dispatch);
4583             dispatch++;
4584         }
4585         stl_be_phys(cs->as,
4586                     spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4587     }
4588 }
4589 
4590 static void spapr_machine_class_init(ObjectClass *oc, void *data)
4591 {
4592     MachineClass *mc = MACHINE_CLASS(oc);
4593     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
4594     FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
4595     NMIClass *nc = NMI_CLASS(oc);
4596     HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
4597     PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
4598     XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
4599     InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
4600     XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc);
4601     VofMachineIfClass *vmc = VOF_MACHINE_CLASS(oc);
4602 
4603     mc->desc = "pSeries Logical Partition (PAPR compliant)";
4604     mc->ignore_boot_device_suffixes = true;
4605 
4606     /*
4607      * We set up the default / latest behaviour here.  The class_init
4608      * functions for the specific versioned machine types can override
4609      * these details for backwards compatibility
4610      */
4611     mc->init = spapr_machine_init;
4612     mc->reset = spapr_machine_reset;
4613     mc->block_default_type = IF_SCSI;
4614 
4615     /*
4616      * Setting max_cpus to INT32_MAX. Both KVM and TCG max_cpus values
4617      * should be limited by the host capability instead of hardcoded.
4618      * max_cpus for KVM guests will be checked in kvm_init(), and TCG
4619      * guests are welcome to have as many CPUs as the host are capable
4620      * of emulate.
4621      */
4622     mc->max_cpus = INT32_MAX;
4623 
4624     mc->no_parallel = 1;
4625     mc->default_boot_order = "";
4626     mc->default_ram_size = 512 * MiB;
4627     mc->default_ram_id = "ppc_spapr.ram";
4628     mc->default_display = "std";
4629     mc->kvm_type = spapr_kvm_type;
4630     machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
4631     mc->pci_allow_0_address = true;
4632     assert(!mc->get_hotplug_handler);
4633     mc->get_hotplug_handler = spapr_get_hotplug_handler;
4634     hc->pre_plug = spapr_machine_device_pre_plug;
4635     hc->plug = spapr_machine_device_plug;
4636     mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
4637     mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id;
4638     mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
4639     hc->unplug_request = spapr_machine_device_unplug_request;
4640     hc->unplug = spapr_machine_device_unplug;
4641 
4642     smc->dr_lmb_enabled = true;
4643     smc->update_dt_enabled = true;
4644     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2");
4645     mc->has_hotpluggable_cpus = true;
4646     mc->nvdimm_supported = true;
4647     smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
4648     fwc->get_dev_path = spapr_get_fw_dev_path;
4649     nc->nmi_monitor_handler = spapr_nmi;
4650     smc->phb_placement = spapr_phb_placement;
4651     vhc->cpu_in_nested = spapr_cpu_in_nested;
4652     vhc->deliver_hv_excp = spapr_exit_nested;
4653     vhc->hypercall = emulate_spapr_hypercall;
4654     vhc->hpt_mask = spapr_hpt_mask;
4655     vhc->map_hptes = spapr_map_hptes;
4656     vhc->unmap_hptes = spapr_unmap_hptes;
4657     vhc->hpte_set_c = spapr_hpte_set_c;
4658     vhc->hpte_set_r = spapr_hpte_set_r;
4659     vhc->get_pate = spapr_get_pate;
4660     vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
4661     vhc->cpu_exec_enter = spapr_cpu_exec_enter;
4662     vhc->cpu_exec_exit = spapr_cpu_exec_exit;
4663     xic->ics_get = spapr_ics_get;
4664     xic->ics_resend = spapr_ics_resend;
4665     xic->icp_get = spapr_icp_get;
4666     ispc->print_info = spapr_pic_print_info;
4667     /* Force NUMA node memory size to be a multiple of
4668      * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
4669      * in which LMBs are represented and hot-added
4670      */
4671     mc->numa_mem_align_shift = 28;
4672     mc->auto_enable_numa = true;
4673 
4674     smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF;
4675     smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON;
4676     smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON;
4677     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
4678     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
4679     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND;
4680     smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */
4681     smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF;
4682     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON;
4683     smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON;
4684     smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON;
4685     smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF;
4686 
4687     /*
4688      * This cap specifies whether the AIL 3 mode for
4689      * H_SET_RESOURCE is supported. The default is modified
4690      * by default_caps_with_cpu().
4691      */
4692     smc->default_caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_ON;
4693     spapr_caps_add_properties(smc);
4694     smc->irq = &spapr_irq_dual;
4695     smc->dr_phb_enabled = true;
4696     smc->linux_pci_probe = true;
4697     smc->smp_threads_vsmt = true;
4698     smc->nr_xirqs = SPAPR_NR_XIRQS;
4699     xfc->match_nvt = spapr_match_nvt;
4700     vmc->client_architecture_support = spapr_vof_client_architecture_support;
4701     vmc->quiesce = spapr_vof_quiesce;
4702     vmc->setprop = spapr_vof_setprop;
4703 }
4704 
4705 static const TypeInfo spapr_machine_info = {
4706     .name          = TYPE_SPAPR_MACHINE,
4707     .parent        = TYPE_MACHINE,
4708     .abstract      = true,
4709     .instance_size = sizeof(SpaprMachineState),
4710     .instance_init = spapr_instance_init,
4711     .instance_finalize = spapr_machine_finalizefn,
4712     .class_size    = sizeof(SpaprMachineClass),
4713     .class_init    = spapr_machine_class_init,
4714     .interfaces = (InterfaceInfo[]) {
4715         { TYPE_FW_PATH_PROVIDER },
4716         { TYPE_NMI },
4717         { TYPE_HOTPLUG_HANDLER },
4718         { TYPE_PPC_VIRTUAL_HYPERVISOR },
4719         { TYPE_XICS_FABRIC },
4720         { TYPE_INTERRUPT_STATS_PROVIDER },
4721         { TYPE_XIVE_FABRIC },
4722         { TYPE_VOF_MACHINE_IF },
4723         { }
4724     },
4725 };
4726 
4727 static void spapr_machine_latest_class_options(MachineClass *mc)
4728 {
4729     mc->alias = "pseries";
4730     mc->is_default = true;
4731 }
4732 
4733 #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest)                 \
4734     static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
4735                                                     void *data)      \
4736     {                                                                \
4737         MachineClass *mc = MACHINE_CLASS(oc);                        \
4738         spapr_machine_##suffix##_class_options(mc);                  \
4739         if (latest) {                                                \
4740             spapr_machine_latest_class_options(mc);                  \
4741         }                                                            \
4742     }                                                                \
4743     static const TypeInfo spapr_machine_##suffix##_info = {          \
4744         .name = MACHINE_TYPE_NAME("pseries-" verstr),                \
4745         .parent = TYPE_SPAPR_MACHINE,                                \
4746         .class_init = spapr_machine_##suffix##_class_init,           \
4747     };                                                               \
4748     static void spapr_machine_register_##suffix(void)                \
4749     {                                                                \
4750         type_register(&spapr_machine_##suffix##_info);               \
4751     }                                                                \
4752     type_init(spapr_machine_register_##suffix)
4753 
4754 /*
4755  * pseries-8.1
4756  */
4757 static void spapr_machine_8_1_class_options(MachineClass *mc)
4758 {
4759     /* Defaults for the latest behaviour inherited from the base class */
4760 }
4761 
4762 DEFINE_SPAPR_MACHINE(8_1, "8.1", true);
4763 
4764 /*
4765  * pseries-8.0
4766  */
4767 static void spapr_machine_8_0_class_options(MachineClass *mc)
4768 {
4769     spapr_machine_8_1_class_options(mc);
4770     compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len);
4771 }
4772 
4773 DEFINE_SPAPR_MACHINE(8_0, "8.0", false);
4774 
4775 /*
4776  * pseries-7.2
4777  */
4778 static void spapr_machine_7_2_class_options(MachineClass *mc)
4779 {
4780     spapr_machine_8_0_class_options(mc);
4781     compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len);
4782 }
4783 
4784 DEFINE_SPAPR_MACHINE(7_2, "7.2", false);
4785 
4786 /*
4787  * pseries-7.1
4788  */
4789 static void spapr_machine_7_1_class_options(MachineClass *mc)
4790 {
4791     spapr_machine_7_2_class_options(mc);
4792     compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len);
4793 }
4794 
4795 DEFINE_SPAPR_MACHINE(7_1, "7.1", false);
4796 
4797 /*
4798  * pseries-7.0
4799  */
4800 static void spapr_machine_7_0_class_options(MachineClass *mc)
4801 {
4802     spapr_machine_7_1_class_options(mc);
4803     compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len);
4804 }
4805 
4806 DEFINE_SPAPR_MACHINE(7_0, "7.0", false);
4807 
4808 /*
4809  * pseries-6.2
4810  */
4811 static void spapr_machine_6_2_class_options(MachineClass *mc)
4812 {
4813     spapr_machine_7_0_class_options(mc);
4814     compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
4815 }
4816 
4817 DEFINE_SPAPR_MACHINE(6_2, "6.2", false);
4818 
4819 /*
4820  * pseries-6.1
4821  */
4822 static void spapr_machine_6_1_class_options(MachineClass *mc)
4823 {
4824     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4825 
4826     spapr_machine_6_2_class_options(mc);
4827     compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len);
4828     smc->pre_6_2_numa_affinity = true;
4829     mc->smp_props.prefer_sockets = true;
4830 }
4831 
4832 DEFINE_SPAPR_MACHINE(6_1, "6.1", false);
4833 
4834 /*
4835  * pseries-6.0
4836  */
4837 static void spapr_machine_6_0_class_options(MachineClass *mc)
4838 {
4839     spapr_machine_6_1_class_options(mc);
4840     compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
4841 }
4842 
4843 DEFINE_SPAPR_MACHINE(6_0, "6.0", false);
4844 
4845 /*
4846  * pseries-5.2
4847  */
4848 static void spapr_machine_5_2_class_options(MachineClass *mc)
4849 {
4850     spapr_machine_6_0_class_options(mc);
4851     compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len);
4852 }
4853 
4854 DEFINE_SPAPR_MACHINE(5_2, "5.2", false);
4855 
4856 /*
4857  * pseries-5.1
4858  */
4859 static void spapr_machine_5_1_class_options(MachineClass *mc)
4860 {
4861     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4862 
4863     spapr_machine_5_2_class_options(mc);
4864     compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len);
4865     smc->pre_5_2_numa_associativity = true;
4866 }
4867 
4868 DEFINE_SPAPR_MACHINE(5_1, "5.1", false);
4869 
4870 /*
4871  * pseries-5.0
4872  */
4873 static void spapr_machine_5_0_class_options(MachineClass *mc)
4874 {
4875     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4876     static GlobalProperty compat[] = {
4877         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" },
4878     };
4879 
4880     spapr_machine_5_1_class_options(mc);
4881     compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len);
4882     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4883     mc->numa_mem_supported = true;
4884     smc->pre_5_1_assoc_refpoints = true;
4885 }
4886 
4887 DEFINE_SPAPR_MACHINE(5_0, "5.0", false);
4888 
4889 /*
4890  * pseries-4.2
4891  */
4892 static void spapr_machine_4_2_class_options(MachineClass *mc)
4893 {
4894     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4895 
4896     spapr_machine_5_0_class_options(mc);
4897     compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len);
4898     smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF;
4899     smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF;
4900     smc->rma_limit = 16 * GiB;
4901     mc->nvdimm_supported = false;
4902 }
4903 
4904 DEFINE_SPAPR_MACHINE(4_2, "4.2", false);
4905 
4906 /*
4907  * pseries-4.1
4908  */
4909 static void spapr_machine_4_1_class_options(MachineClass *mc)
4910 {
4911     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4912     static GlobalProperty compat[] = {
4913         /* Only allow 4kiB and 64kiB IOMMU pagesizes */
4914         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" },
4915     };
4916 
4917     spapr_machine_4_2_class_options(mc);
4918     smc->linux_pci_probe = false;
4919     smc->smp_threads_vsmt = false;
4920     compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
4921     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4922 }
4923 
4924 DEFINE_SPAPR_MACHINE(4_1, "4.1", false);
4925 
4926 /*
4927  * pseries-4.0
4928  */
4929 static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index,
4930                               uint64_t *buid, hwaddr *pio,
4931                               hwaddr *mmio32, hwaddr *mmio64,
4932                               unsigned n_dma, uint32_t *liobns,
4933                               hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
4934 {
4935     if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma,
4936                              liobns, nv2gpa, nv2atsd, errp)) {
4937         return false;
4938     }
4939 
4940     *nv2gpa = 0;
4941     *nv2atsd = 0;
4942     return true;
4943 }
4944 static void spapr_machine_4_0_class_options(MachineClass *mc)
4945 {
4946     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4947 
4948     spapr_machine_4_1_class_options(mc);
4949     compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len);
4950     smc->phb_placement = phb_placement_4_0;
4951     smc->irq = &spapr_irq_xics;
4952     smc->pre_4_1_migration = true;
4953 }
4954 
4955 DEFINE_SPAPR_MACHINE(4_0, "4.0", false);
4956 
4957 /*
4958  * pseries-3.1
4959  */
4960 static void spapr_machine_3_1_class_options(MachineClass *mc)
4961 {
4962     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4963 
4964     spapr_machine_4_0_class_options(mc);
4965     compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
4966 
4967     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
4968     smc->update_dt_enabled = false;
4969     smc->dr_phb_enabled = false;
4970     smc->broken_host_serial_model = true;
4971     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
4972     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
4973     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
4974     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
4975 }
4976 
4977 DEFINE_SPAPR_MACHINE(3_1, "3.1", false);
4978 
4979 /*
4980  * pseries-3.0
4981  */
4982 
4983 static void spapr_machine_3_0_class_options(MachineClass *mc)
4984 {
4985     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4986 
4987     spapr_machine_3_1_class_options(mc);
4988     compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
4989 
4990     smc->legacy_irq_allocation = true;
4991     smc->nr_xirqs = 0x400;
4992     smc->irq = &spapr_irq_xics_legacy;
4993 }
4994 
4995 DEFINE_SPAPR_MACHINE(3_0, "3.0", false);
4996 
4997 /*
4998  * pseries-2.12
4999  */
5000 static void spapr_machine_2_12_class_options(MachineClass *mc)
5001 {
5002     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5003     static GlobalProperty compat[] = {
5004         { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" },
5005         { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" },
5006     };
5007 
5008     spapr_machine_3_0_class_options(mc);
5009     compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len);
5010     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5011 
5012     /* We depend on kvm_enabled() to choose a default value for the
5013      * hpt-max-page-size capability. Of course we can't do it here
5014      * because this is too early and the HW accelerator isn't initialzed
5015      * yet. Postpone this to machine init (see default_caps_with_cpu()).
5016      */
5017     smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0;
5018 }
5019 
5020 DEFINE_SPAPR_MACHINE(2_12, "2.12", false);
5021 
5022 static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc)
5023 {
5024     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5025 
5026     spapr_machine_2_12_class_options(mc);
5027     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
5028     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
5029     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD;
5030 }
5031 
5032 DEFINE_SPAPR_MACHINE(2_12_sxxm, "2.12-sxxm", false);
5033 
5034 /*
5035  * pseries-2.11
5036  */
5037 
5038 static void spapr_machine_2_11_class_options(MachineClass *mc)
5039 {
5040     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5041 
5042     spapr_machine_2_12_class_options(mc);
5043     smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON;
5044     compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len);
5045 }
5046 
5047 DEFINE_SPAPR_MACHINE(2_11, "2.11", false);
5048 
5049 /*
5050  * pseries-2.10
5051  */
5052 
5053 static void spapr_machine_2_10_class_options(MachineClass *mc)
5054 {
5055     spapr_machine_2_11_class_options(mc);
5056     compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len);
5057 }
5058 
5059 DEFINE_SPAPR_MACHINE(2_10, "2.10", false);
5060 
5061 /*
5062  * pseries-2.9
5063  */
5064 
5065 static void spapr_machine_2_9_class_options(MachineClass *mc)
5066 {
5067     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5068     static GlobalProperty compat[] = {
5069         { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" },
5070     };
5071 
5072     spapr_machine_2_10_class_options(mc);
5073     compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len);
5074     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5075     smc->pre_2_10_has_unused_icps = true;
5076     smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED;
5077 }
5078 
5079 DEFINE_SPAPR_MACHINE(2_9, "2.9", false);
5080 
5081 /*
5082  * pseries-2.8
5083  */
5084 
5085 static void spapr_machine_2_8_class_options(MachineClass *mc)
5086 {
5087     static GlobalProperty compat[] = {
5088         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" },
5089     };
5090 
5091     spapr_machine_2_9_class_options(mc);
5092     compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len);
5093     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5094     mc->numa_mem_align_shift = 23;
5095 }
5096 
5097 DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
5098 
5099 /*
5100  * pseries-2.7
5101  */
5102 
5103 static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
5104                               uint64_t *buid, hwaddr *pio,
5105                               hwaddr *mmio32, hwaddr *mmio64,
5106                               unsigned n_dma, uint32_t *liobns,
5107                               hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
5108 {
5109     /* Legacy PHB placement for pseries-2.7 and earlier machine types */
5110     const uint64_t base_buid = 0x800000020000000ULL;
5111     const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */
5112     const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */
5113     const hwaddr pio_offset = 0x80000000; /* 2 GiB */
5114     const uint32_t max_index = 255;
5115     const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */
5116 
5117     uint64_t ram_top = MACHINE(spapr)->ram_size;
5118     hwaddr phb0_base, phb_base;
5119     int i;
5120 
5121     /* Do we have device memory? */
5122     if (MACHINE(spapr)->maxram_size > ram_top) {
5123         /* Can't just use maxram_size, because there may be an
5124          * alignment gap between normal and device memory regions
5125          */
5126         ram_top = MACHINE(spapr)->device_memory->base +
5127             memory_region_size(&MACHINE(spapr)->device_memory->mr);
5128     }
5129 
5130     phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
5131 
5132     if (index > max_index) {
5133         error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
5134                    max_index);
5135         return false;
5136     }
5137 
5138     *buid = base_buid + index;
5139     for (i = 0; i < n_dma; ++i) {
5140         liobns[i] = SPAPR_PCI_LIOBN(index, i);
5141     }
5142 
5143     phb_base = phb0_base + index * phb_spacing;
5144     *pio = phb_base + pio_offset;
5145     *mmio32 = phb_base + mmio_offset;
5146     /*
5147      * We don't set the 64-bit MMIO window, relying on the PHB's
5148      * fallback behaviour of automatically splitting a large "32-bit"
5149      * window into contiguous 32-bit and 64-bit windows
5150      */
5151 
5152     *nv2gpa = 0;
5153     *nv2atsd = 0;
5154     return true;
5155 }
5156 
5157 static void spapr_machine_2_7_class_options(MachineClass *mc)
5158 {
5159     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5160     static GlobalProperty compat[] = {
5161         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", },
5162         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", },
5163         { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", },
5164         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", },
5165     };
5166 
5167     spapr_machine_2_8_class_options(mc);
5168     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3");
5169     mc->default_machine_opts = "modern-hotplug-events=off";
5170     compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len);
5171     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5172     smc->phb_placement = phb_placement_2_7;
5173 }
5174 
5175 DEFINE_SPAPR_MACHINE(2_7, "2.7", false);
5176 
5177 /*
5178  * pseries-2.6
5179  */
5180 
5181 static void spapr_machine_2_6_class_options(MachineClass *mc)
5182 {
5183     static GlobalProperty compat[] = {
5184         { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" },
5185     };
5186 
5187     spapr_machine_2_7_class_options(mc);
5188     mc->has_hotpluggable_cpus = false;
5189     compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len);
5190     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5191 }
5192 
5193 DEFINE_SPAPR_MACHINE(2_6, "2.6", false);
5194 
5195 /*
5196  * pseries-2.5
5197  */
5198 
5199 static void spapr_machine_2_5_class_options(MachineClass *mc)
5200 {
5201     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5202     static GlobalProperty compat[] = {
5203         { "spapr-vlan", "use-rx-buffer-pools", "off" },
5204     };
5205 
5206     spapr_machine_2_6_class_options(mc);
5207     smc->use_ohci_by_default = true;
5208     compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len);
5209     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5210 }
5211 
5212 DEFINE_SPAPR_MACHINE(2_5, "2.5", false);
5213 
5214 /*
5215  * pseries-2.4
5216  */
5217 
5218 static void spapr_machine_2_4_class_options(MachineClass *mc)
5219 {
5220     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5221 
5222     spapr_machine_2_5_class_options(mc);
5223     smc->dr_lmb_enabled = false;
5224     compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len);
5225 }
5226 
5227 DEFINE_SPAPR_MACHINE(2_4, "2.4", false);
5228 
5229 /*
5230  * pseries-2.3
5231  */
5232 
5233 static void spapr_machine_2_3_class_options(MachineClass *mc)
5234 {
5235     static GlobalProperty compat[] = {
5236         { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" },
5237     };
5238     spapr_machine_2_4_class_options(mc);
5239     compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len);
5240     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5241 }
5242 DEFINE_SPAPR_MACHINE(2_3, "2.3", false);
5243 
5244 /*
5245  * pseries-2.2
5246  */
5247 
5248 static void spapr_machine_2_2_class_options(MachineClass *mc)
5249 {
5250     static GlobalProperty compat[] = {
5251         { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" },
5252     };
5253 
5254     spapr_machine_2_3_class_options(mc);
5255     compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len);
5256     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5257     mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on";
5258 }
5259 DEFINE_SPAPR_MACHINE(2_2, "2.2", false);
5260 
5261 /*
5262  * pseries-2.1
5263  */
5264 
5265 static void spapr_machine_2_1_class_options(MachineClass *mc)
5266 {
5267     spapr_machine_2_2_class_options(mc);
5268     compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len);
5269 }
5270 DEFINE_SPAPR_MACHINE(2_1, "2.1", false);
5271 
5272 static void spapr_machine_register_types(void)
5273 {
5274     type_register_static(&spapr_machine_info);
5275 }
5276 
5277 type_init(spapr_machine_register_types)
5278