xref: /openbmc/qemu/hw/ppc/spapr.c (revision a65afaae0fd6754a80fe8c9aad6a066fe84b537d)
1 /*
2  * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3  *
4  * Copyright (c) 2004-2007 Fabrice Bellard
5  * Copyright (c) 2007 Jocelyn Mayer
6  * Copyright (c) 2010 David Gibson, IBM Corporation.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  *
26  */
27 #include "qemu/osdep.h"
28 #include "qapi/error.h"
29 #include "sysemu/sysemu.h"
30 #include "sysemu/numa.h"
31 #include "hw/hw.h"
32 #include "qemu/log.h"
33 #include "hw/fw-path-provider.h"
34 #include "elf.h"
35 #include "net/net.h"
36 #include "sysemu/device_tree.h"
37 #include "sysemu/block-backend.h"
38 #include "sysemu/cpus.h"
39 #include "sysemu/hw_accel.h"
40 #include "kvm_ppc.h"
41 #include "migration/migration.h"
42 #include "mmu-hash64.h"
43 #include "mmu-book3s-v3.h"
44 #include "qom/cpu.h"
45 
46 #include "hw/boards.h"
47 #include "hw/ppc/ppc.h"
48 #include "hw/loader.h"
49 
50 #include "hw/ppc/fdt.h"
51 #include "hw/ppc/spapr.h"
52 #include "hw/ppc/spapr_vio.h"
53 #include "hw/pci-host/spapr.h"
54 #include "hw/ppc/xics.h"
55 #include "hw/pci/msi.h"
56 
57 #include "hw/pci/pci.h"
58 #include "hw/scsi/scsi.h"
59 #include "hw/virtio/virtio-scsi.h"
60 
61 #include "exec/address-spaces.h"
62 #include "hw/usb.h"
63 #include "qemu/config-file.h"
64 #include "qemu/error-report.h"
65 #include "trace.h"
66 #include "hw/nmi.h"
67 #include "hw/intc/intc.h"
68 
69 #include "hw/compat.h"
70 #include "qemu/cutils.h"
71 #include "hw/ppc/spapr_cpu_core.h"
72 #include "qmp-commands.h"
73 
74 #include <libfdt.h>
75 
76 /* SLOF memory layout:
77  *
78  * SLOF raw image loaded at 0, copies its romfs right below the flat
79  * device-tree, then position SLOF itself 31M below that
80  *
81  * So we set FW_OVERHEAD to 40MB which should account for all of that
82  * and more
83  *
84  * We load our kernel at 4M, leaving space for SLOF initial image
85  */
86 #define FDT_MAX_SIZE            0x100000
87 #define RTAS_MAX_SIZE           0x10000
88 #define RTAS_MAX_ADDR           0x80000000 /* RTAS must stay below that */
89 #define FW_MAX_SIZE             0x400000
90 #define FW_FILE_NAME            "slof.bin"
91 #define FW_OVERHEAD             0x2800000
92 #define KERNEL_LOAD_ADDR        FW_MAX_SIZE
93 
94 #define MIN_RMA_SLOF            128UL
95 
96 #define PHANDLE_XICP            0x00001111
97 
98 #define HTAB_SIZE(spapr)        (1ULL << ((spapr)->htab_shift))
99 
100 static ICSState *spapr_ics_create(sPAPRMachineState *spapr,
101                                   const char *type_ics,
102                                   int nr_irqs, Error **errp)
103 {
104     Error *local_err = NULL;
105     Object *obj;
106 
107     obj = object_new(type_ics);
108     object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
109     object_property_add_const_link(obj, "xics", OBJECT(spapr), &error_abort);
110     object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err);
111     if (local_err) {
112         goto error;
113     }
114     object_property_set_bool(obj, true, "realized", &local_err);
115     if (local_err) {
116         goto error;
117     }
118 
119     return ICS_SIMPLE(obj);
120 
121 error:
122     error_propagate(errp, local_err);
123     return NULL;
124 }
125 
126 static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp)
127 {
128     sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
129 
130     if (kvm_enabled()) {
131         if (machine_kernel_irqchip_allowed(machine) &&
132             !xics_kvm_init(spapr, errp)) {
133             spapr->icp_type = TYPE_KVM_ICP;
134             spapr->ics = spapr_ics_create(spapr, TYPE_ICS_KVM, nr_irqs, errp);
135         }
136         if (machine_kernel_irqchip_required(machine) && !spapr->ics) {
137             error_prepend(errp, "kernel_irqchip requested but unavailable: ");
138             return;
139         }
140     }
141 
142     if (!spapr->ics) {
143         xics_spapr_init(spapr);
144         spapr->icp_type = TYPE_ICP;
145         spapr->ics = spapr_ics_create(spapr, TYPE_ICS_SIMPLE, nr_irqs, errp);
146         if (!spapr->ics) {
147             return;
148         }
149     }
150 }
151 
152 static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
153                                   int smt_threads)
154 {
155     int i, ret = 0;
156     uint32_t servers_prop[smt_threads];
157     uint32_t gservers_prop[smt_threads * 2];
158     int index = ppc_get_vcpu_dt_id(cpu);
159 
160     if (cpu->compat_pvr) {
161         ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
162         if (ret < 0) {
163             return ret;
164         }
165     }
166 
167     /* Build interrupt servers and gservers properties */
168     for (i = 0; i < smt_threads; i++) {
169         servers_prop[i] = cpu_to_be32(index + i);
170         /* Hack, direct the group queues back to cpu 0 */
171         gservers_prop[i*2] = cpu_to_be32(index + i);
172         gservers_prop[i*2 + 1] = 0;
173     }
174     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
175                       servers_prop, sizeof(servers_prop));
176     if (ret < 0) {
177         return ret;
178     }
179     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
180                       gservers_prop, sizeof(gservers_prop));
181 
182     return ret;
183 }
184 
185 static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, PowerPCCPU *cpu)
186 {
187     int index = ppc_get_vcpu_dt_id(cpu);
188     uint32_t associativity[] = {cpu_to_be32(0x5),
189                                 cpu_to_be32(0x0),
190                                 cpu_to_be32(0x0),
191                                 cpu_to_be32(0x0),
192                                 cpu_to_be32(cpu->node_id),
193                                 cpu_to_be32(index)};
194 
195     /* Advertise NUMA via ibm,associativity */
196     return fdt_setprop(fdt, offset, "ibm,associativity", associativity,
197                           sizeof(associativity));
198 }
199 
200 /* Populate the "ibm,pa-features" property */
201 static void spapr_populate_pa_features(CPUPPCState *env, void *fdt, int offset,
202                                       bool legacy_guest)
203 {
204     uint8_t pa_features_206[] = { 6, 0,
205         0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
206     uint8_t pa_features_207[] = { 24, 0,
207         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
208         0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
209         0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
210         0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
211     uint8_t pa_features_300[] = { 66, 0,
212         /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
213         /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */
214         0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */
215         /* 6: DS207 */
216         0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
217         /* 16: Vector */
218         0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
219         /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */
220         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
221         /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
222         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
223         /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */
224         0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
225         /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */
226         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */
227         /* 42: PM, 44: PC RA, 46: SC vec'd */
228         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
229         /* 48: SIMD, 50: QP BFP, 52: String */
230         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
231         /* 54: DecFP, 56: DecI, 58: SHA */
232         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
233         /* 60: NM atomic, 62: RNG */
234         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
235     };
236     uint8_t *pa_features;
237     size_t pa_size;
238 
239     switch (POWERPC_MMU_VER(env->mmu_model)) {
240     case POWERPC_MMU_VER_2_06:
241         pa_features = pa_features_206;
242         pa_size = sizeof(pa_features_206);
243         break;
244     case POWERPC_MMU_VER_2_07:
245         pa_features = pa_features_207;
246         pa_size = sizeof(pa_features_207);
247         break;
248     case POWERPC_MMU_VER_3_00:
249         pa_features = pa_features_300;
250         pa_size = sizeof(pa_features_300);
251         break;
252     default:
253         return;
254     }
255 
256     if (env->ci_large_pages) {
257         /*
258          * Note: we keep CI large pages off by default because a 64K capable
259          * guest provisioned with large pages might otherwise try to map a qemu
260          * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
261          * even if that qemu runs on a 4k host.
262          * We dd this bit back here if we are confident this is not an issue
263          */
264         pa_features[3] |= 0x20;
265     }
266     if (kvmppc_has_cap_htm() && pa_size > 24) {
267         pa_features[24] |= 0x80;    /* Transactional memory support */
268     }
269     if (legacy_guest && pa_size > 40) {
270         /* Workaround for broken kernels that attempt (guest) radix
271          * mode when they can't handle it, if they see the radix bit set
272          * in pa-features. So hide it from them. */
273         pa_features[40 + 2] &= ~0x80; /* Radix MMU */
274     }
275 
276     _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
277 }
278 
279 static int spapr_fixup_cpu_dt(void *fdt, sPAPRMachineState *spapr)
280 {
281     int ret = 0, offset, cpus_offset;
282     CPUState *cs;
283     char cpu_model[32];
284     int smt = kvmppc_smt_threads();
285     uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
286 
287     CPU_FOREACH(cs) {
288         PowerPCCPU *cpu = POWERPC_CPU(cs);
289         CPUPPCState *env = &cpu->env;
290         DeviceClass *dc = DEVICE_GET_CLASS(cs);
291         int index = ppc_get_vcpu_dt_id(cpu);
292         int compat_smt = MIN(smp_threads, ppc_compat_max_threads(cpu));
293 
294         if ((index % smt) != 0) {
295             continue;
296         }
297 
298         snprintf(cpu_model, 32, "%s@%x", dc->fw_name, index);
299 
300         cpus_offset = fdt_path_offset(fdt, "/cpus");
301         if (cpus_offset < 0) {
302             cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
303                                           "cpus");
304             if (cpus_offset < 0) {
305                 return cpus_offset;
306             }
307         }
308         offset = fdt_subnode_offset(fdt, cpus_offset, cpu_model);
309         if (offset < 0) {
310             offset = fdt_add_subnode(fdt, cpus_offset, cpu_model);
311             if (offset < 0) {
312                 return offset;
313             }
314         }
315 
316         ret = fdt_setprop(fdt, offset, "ibm,pft-size",
317                           pft_size_prop, sizeof(pft_size_prop));
318         if (ret < 0) {
319             return ret;
320         }
321 
322         if (nb_numa_nodes > 1) {
323             ret = spapr_fixup_cpu_numa_dt(fdt, offset, cpu);
324             if (ret < 0) {
325                 return ret;
326             }
327         }
328 
329         ret = spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt);
330         if (ret < 0) {
331             return ret;
332         }
333 
334         spapr_populate_pa_features(env, fdt, offset,
335                                          spapr->cas_legacy_guest_workaround);
336     }
337     return ret;
338 }
339 
340 static hwaddr spapr_node0_size(void)
341 {
342     MachineState *machine = MACHINE(qdev_get_machine());
343 
344     if (nb_numa_nodes) {
345         int i;
346         for (i = 0; i < nb_numa_nodes; ++i) {
347             if (numa_info[i].node_mem) {
348                 return MIN(pow2floor(numa_info[i].node_mem),
349                            machine->ram_size);
350             }
351         }
352     }
353     return machine->ram_size;
354 }
355 
356 static void add_str(GString *s, const gchar *s1)
357 {
358     g_string_append_len(s, s1, strlen(s1) + 1);
359 }
360 
361 static int spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start,
362                                        hwaddr size)
363 {
364     uint32_t associativity[] = {
365         cpu_to_be32(0x4), /* length */
366         cpu_to_be32(0x0), cpu_to_be32(0x0),
367         cpu_to_be32(0x0), cpu_to_be32(nodeid)
368     };
369     char mem_name[32];
370     uint64_t mem_reg_property[2];
371     int off;
372 
373     mem_reg_property[0] = cpu_to_be64(start);
374     mem_reg_property[1] = cpu_to_be64(size);
375 
376     sprintf(mem_name, "memory@" TARGET_FMT_lx, start);
377     off = fdt_add_subnode(fdt, 0, mem_name);
378     _FDT(off);
379     _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
380     _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
381                       sizeof(mem_reg_property))));
382     _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
383                       sizeof(associativity))));
384     return off;
385 }
386 
387 static int spapr_populate_memory(sPAPRMachineState *spapr, void *fdt)
388 {
389     MachineState *machine = MACHINE(spapr);
390     hwaddr mem_start, node_size;
391     int i, nb_nodes = nb_numa_nodes;
392     NodeInfo *nodes = numa_info;
393     NodeInfo ramnode;
394 
395     /* No NUMA nodes, assume there is just one node with whole RAM */
396     if (!nb_numa_nodes) {
397         nb_nodes = 1;
398         ramnode.node_mem = machine->ram_size;
399         nodes = &ramnode;
400     }
401 
402     for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
403         if (!nodes[i].node_mem) {
404             continue;
405         }
406         if (mem_start >= machine->ram_size) {
407             node_size = 0;
408         } else {
409             node_size = nodes[i].node_mem;
410             if (node_size > machine->ram_size - mem_start) {
411                 node_size = machine->ram_size - mem_start;
412             }
413         }
414         if (!mem_start) {
415             /* ppc_spapr_init() checks for rma_size <= node0_size already */
416             spapr_populate_memory_node(fdt, i, 0, spapr->rma_size);
417             mem_start += spapr->rma_size;
418             node_size -= spapr->rma_size;
419         }
420         for ( ; node_size; ) {
421             hwaddr sizetmp = pow2floor(node_size);
422 
423             /* mem_start != 0 here */
424             if (ctzl(mem_start) < ctzl(sizetmp)) {
425                 sizetmp = 1ULL << ctzl(mem_start);
426             }
427 
428             spapr_populate_memory_node(fdt, i, mem_start, sizetmp);
429             node_size -= sizetmp;
430             mem_start += sizetmp;
431         }
432     }
433 
434     return 0;
435 }
436 
437 static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
438                                   sPAPRMachineState *spapr)
439 {
440     PowerPCCPU *cpu = POWERPC_CPU(cs);
441     CPUPPCState *env = &cpu->env;
442     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
443     int index = ppc_get_vcpu_dt_id(cpu);
444     uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
445                        0xffffffff, 0xffffffff};
446     uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
447         : SPAPR_TIMEBASE_FREQ;
448     uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
449     uint32_t page_sizes_prop[64];
450     size_t page_sizes_prop_size;
451     uint32_t vcpus_per_socket = smp_threads * smp_cores;
452     uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
453     int compat_smt = MIN(smp_threads, ppc_compat_max_threads(cpu));
454     sPAPRDRConnector *drc;
455     sPAPRDRConnectorClass *drck;
456     int drc_index;
457     uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
458     int i;
459 
460     drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index);
461     if (drc) {
462         drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
463         drc_index = drck->get_index(drc);
464         _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
465     }
466 
467     _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
468     _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
469 
470     _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
471     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
472                            env->dcache_line_size)));
473     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
474                            env->dcache_line_size)));
475     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
476                            env->icache_line_size)));
477     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
478                            env->icache_line_size)));
479 
480     if (pcc->l1_dcache_size) {
481         _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
482                                pcc->l1_dcache_size)));
483     } else {
484         error_report("Warning: Unknown L1 dcache size for cpu");
485     }
486     if (pcc->l1_icache_size) {
487         _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
488                                pcc->l1_icache_size)));
489     } else {
490         error_report("Warning: Unknown L1 icache size for cpu");
491     }
492 
493     _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
494     _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
495     _FDT((fdt_setprop_cell(fdt, offset, "slb-size", env->slb_nr)));
496     _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", env->slb_nr)));
497     _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
498     _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
499 
500     if (env->spr_cb[SPR_PURR].oea_read) {
501         _FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0)));
502     }
503 
504     if (env->mmu_model & POWERPC_MMU_1TSEG) {
505         _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
506                           segs, sizeof(segs))));
507     }
508 
509     /* Advertise VMX/VSX (vector extensions) if available
510      *   0 / no property == no vector extensions
511      *   1               == VMX / Altivec available
512      *   2               == VSX available */
513     if (env->insns_flags & PPC_ALTIVEC) {
514         uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1;
515 
516         _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", vmx)));
517     }
518 
519     /* Advertise DFP (Decimal Floating Point) if available
520      *   0 / no property == no DFP
521      *   1               == DFP available */
522     if (env->insns_flags2 & PPC2_DFP) {
523         _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
524     }
525 
526     page_sizes_prop_size = ppc_create_page_sizes_prop(env, page_sizes_prop,
527                                                   sizeof(page_sizes_prop));
528     if (page_sizes_prop_size) {
529         _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
530                           page_sizes_prop, page_sizes_prop_size)));
531     }
532 
533     spapr_populate_pa_features(env, fdt, offset, false);
534 
535     _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
536                            cs->cpu_index / vcpus_per_socket)));
537 
538     _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
539                       pft_size_prop, sizeof(pft_size_prop))));
540 
541     if (nb_numa_nodes > 1) {
542         _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cpu));
543     }
544 
545     _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
546 
547     if (pcc->radix_page_info) {
548         for (i = 0; i < pcc->radix_page_info->count; i++) {
549             radix_AP_encodings[i] =
550                 cpu_to_be32(pcc->radix_page_info->entries[i]);
551         }
552         _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
553                           radix_AP_encodings,
554                           pcc->radix_page_info->count *
555                           sizeof(radix_AP_encodings[0]))));
556     }
557 }
558 
559 static void spapr_populate_cpus_dt_node(void *fdt, sPAPRMachineState *spapr)
560 {
561     CPUState *cs;
562     int cpus_offset;
563     char *nodename;
564     int smt = kvmppc_smt_threads();
565 
566     cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
567     _FDT(cpus_offset);
568     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
569     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
570 
571     /*
572      * We walk the CPUs in reverse order to ensure that CPU DT nodes
573      * created by fdt_add_subnode() end up in the right order in FDT
574      * for the guest kernel the enumerate the CPUs correctly.
575      */
576     CPU_FOREACH_REVERSE(cs) {
577         PowerPCCPU *cpu = POWERPC_CPU(cs);
578         int index = ppc_get_vcpu_dt_id(cpu);
579         DeviceClass *dc = DEVICE_GET_CLASS(cs);
580         int offset;
581 
582         if ((index % smt) != 0) {
583             continue;
584         }
585 
586         nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
587         offset = fdt_add_subnode(fdt, cpus_offset, nodename);
588         g_free(nodename);
589         _FDT(offset);
590         spapr_populate_cpu_dt(cs, fdt, offset, spapr);
591     }
592 
593 }
594 
595 /*
596  * Adds ibm,dynamic-reconfiguration-memory node.
597  * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
598  * of this device tree node.
599  */
600 static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
601 {
602     MachineState *machine = MACHINE(spapr);
603     int ret, i, offset;
604     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
605     uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)};
606     uint32_t hotplug_lmb_start = spapr->hotplug_memory.base / lmb_size;
607     uint32_t nr_lmbs = (spapr->hotplug_memory.base +
608                        memory_region_size(&spapr->hotplug_memory.mr)) /
609                        lmb_size;
610     uint32_t *int_buf, *cur_index, buf_len;
611     int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
612 
613     /*
614      * Don't create the node if there is no hotpluggable memory
615      */
616     if (machine->ram_size == machine->maxram_size) {
617         return 0;
618     }
619 
620     /*
621      * Allocate enough buffer size to fit in ibm,dynamic-memory
622      * or ibm,associativity-lookup-arrays
623      */
624     buf_len = MAX(nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1, nr_nodes * 4 + 2)
625               * sizeof(uint32_t);
626     cur_index = int_buf = g_malloc0(buf_len);
627 
628     offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
629 
630     ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
631                     sizeof(prop_lmb_size));
632     if (ret < 0) {
633         goto out;
634     }
635 
636     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
637     if (ret < 0) {
638         goto out;
639     }
640 
641     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
642     if (ret < 0) {
643         goto out;
644     }
645 
646     /* ibm,dynamic-memory */
647     int_buf[0] = cpu_to_be32(nr_lmbs);
648     cur_index++;
649     for (i = 0; i < nr_lmbs; i++) {
650         uint64_t addr = i * lmb_size;
651         uint32_t *dynamic_memory = cur_index;
652 
653         if (i >= hotplug_lmb_start) {
654             sPAPRDRConnector *drc;
655             sPAPRDRConnectorClass *drck;
656 
657             drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB, i);
658             g_assert(drc);
659             drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
660 
661             dynamic_memory[0] = cpu_to_be32(addr >> 32);
662             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
663             dynamic_memory[2] = cpu_to_be32(drck->get_index(drc));
664             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
665             dynamic_memory[4] = cpu_to_be32(numa_get_node(addr, NULL));
666             if (memory_region_present(get_system_memory(), addr)) {
667                 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
668             } else {
669                 dynamic_memory[5] = cpu_to_be32(0);
670             }
671         } else {
672             /*
673              * LMB information for RMA, boot time RAM and gap b/n RAM and
674              * hotplug memory region -- all these are marked as reserved
675              * and as having no valid DRC.
676              */
677             dynamic_memory[0] = cpu_to_be32(addr >> 32);
678             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
679             dynamic_memory[2] = cpu_to_be32(0);
680             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
681             dynamic_memory[4] = cpu_to_be32(-1);
682             dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
683                                             SPAPR_LMB_FLAGS_DRC_INVALID);
684         }
685 
686         cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
687     }
688     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
689     if (ret < 0) {
690         goto out;
691     }
692 
693     /* ibm,associativity-lookup-arrays */
694     cur_index = int_buf;
695     int_buf[0] = cpu_to_be32(nr_nodes);
696     int_buf[1] = cpu_to_be32(4); /* Number of entries per associativity list */
697     cur_index += 2;
698     for (i = 0; i < nr_nodes; i++) {
699         uint32_t associativity[] = {
700             cpu_to_be32(0x0),
701             cpu_to_be32(0x0),
702             cpu_to_be32(0x0),
703             cpu_to_be32(i)
704         };
705         memcpy(cur_index, associativity, sizeof(associativity));
706         cur_index += 4;
707     }
708     ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
709             (cur_index - int_buf) * sizeof(uint32_t));
710 out:
711     g_free(int_buf);
712     return ret;
713 }
714 
715 static int spapr_dt_cas_updates(sPAPRMachineState *spapr, void *fdt,
716                                 sPAPROptionVector *ov5_updates)
717 {
718     sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
719     int ret = 0, offset;
720 
721     /* Generate ibm,dynamic-reconfiguration-memory node if required */
722     if (spapr_ovec_test(ov5_updates, OV5_DRCONF_MEMORY)) {
723         g_assert(smc->dr_lmb_enabled);
724         ret = spapr_populate_drconf_memory(spapr, fdt);
725         if (ret) {
726             goto out;
727         }
728     }
729 
730     offset = fdt_path_offset(fdt, "/chosen");
731     if (offset < 0) {
732         offset = fdt_add_subnode(fdt, 0, "chosen");
733         if (offset < 0) {
734             return offset;
735         }
736     }
737     ret = spapr_ovec_populate_dt(fdt, offset, spapr->ov5_cas,
738                                  "ibm,architecture-vec-5");
739 
740 out:
741     return ret;
742 }
743 
744 int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
745                                  target_ulong addr, target_ulong size,
746                                  sPAPROptionVector *ov5_updates)
747 {
748     void *fdt, *fdt_skel;
749     sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
750 
751     size -= sizeof(hdr);
752 
753     /* Create sceleton */
754     fdt_skel = g_malloc0(size);
755     _FDT((fdt_create(fdt_skel, size)));
756     _FDT((fdt_begin_node(fdt_skel, "")));
757     _FDT((fdt_end_node(fdt_skel)));
758     _FDT((fdt_finish(fdt_skel)));
759     fdt = g_malloc0(size);
760     _FDT((fdt_open_into(fdt_skel, fdt, size)));
761     g_free(fdt_skel);
762 
763     /* Fixup cpu nodes */
764     _FDT((spapr_fixup_cpu_dt(fdt, spapr)));
765 
766     if (spapr_dt_cas_updates(spapr, fdt, ov5_updates)) {
767         return -1;
768     }
769 
770     /* Pack resulting tree */
771     _FDT((fdt_pack(fdt)));
772 
773     if (fdt_totalsize(fdt) + sizeof(hdr) > size) {
774         trace_spapr_cas_failed(size);
775         return -1;
776     }
777 
778     cpu_physical_memory_write(addr, &hdr, sizeof(hdr));
779     cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt));
780     trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
781     g_free(fdt);
782 
783     return 0;
784 }
785 
786 static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt)
787 {
788     int rtas;
789     GString *hypertas = g_string_sized_new(256);
790     GString *qemu_hypertas = g_string_sized_new(256);
791     uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) };
792     uint64_t max_hotplug_addr = spapr->hotplug_memory.base +
793         memory_region_size(&spapr->hotplug_memory.mr);
794     uint32_t lrdr_capacity[] = {
795         cpu_to_be32(max_hotplug_addr >> 32),
796         cpu_to_be32(max_hotplug_addr & 0xffffffff),
797         0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE),
798         cpu_to_be32(max_cpus / smp_threads),
799     };
800 
801     _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
802 
803     /* hypertas */
804     add_str(hypertas, "hcall-pft");
805     add_str(hypertas, "hcall-term");
806     add_str(hypertas, "hcall-dabr");
807     add_str(hypertas, "hcall-interrupt");
808     add_str(hypertas, "hcall-tce");
809     add_str(hypertas, "hcall-vio");
810     add_str(hypertas, "hcall-splpar");
811     add_str(hypertas, "hcall-bulk");
812     add_str(hypertas, "hcall-set-mode");
813     add_str(hypertas, "hcall-sprg0");
814     add_str(hypertas, "hcall-copy");
815     add_str(hypertas, "hcall-debug");
816     add_str(qemu_hypertas, "hcall-memop1");
817 
818     if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
819         add_str(hypertas, "hcall-multi-tce");
820     }
821     _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
822                      hypertas->str, hypertas->len));
823     g_string_free(hypertas, TRUE);
824     _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
825                      qemu_hypertas->str, qemu_hypertas->len));
826     g_string_free(qemu_hypertas, TRUE);
827 
828     _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
829                      refpoints, sizeof(refpoints)));
830 
831     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
832                           RTAS_ERROR_LOG_MAX));
833     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
834                           RTAS_EVENT_SCAN_RATE));
835 
836     if (msi_nonbroken) {
837         _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
838     }
839 
840     /*
841      * According to PAPR, rtas ibm,os-term does not guarantee a return
842      * back to the guest cpu.
843      *
844      * While an additional ibm,extended-os-term property indicates
845      * that rtas call return will always occur. Set this property.
846      */
847     _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
848 
849     _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
850                      lrdr_capacity, sizeof(lrdr_capacity)));
851 
852     spapr_dt_rtas_tokens(fdt, rtas);
853 }
854 
855 /* Prepare ibm,arch-vec-5-platform-support, which indicates the MMU features
856  * that the guest may request and thus the valid values for bytes 24..26 of
857  * option vector 5: */
858 static void spapr_dt_ov5_platform_support(void *fdt, int chosen)
859 {
860     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
861 
862     char val[2 * 3] = {
863         24, 0x00, /* Hash/Radix, filled in below. */
864         25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
865         26, 0x40, /* Radix options: GTSE == yes. */
866     };
867 
868     if (kvm_enabled()) {
869         if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
870             val[1] = 0x80; /* OV5_MMU_BOTH */
871         } else if (kvmppc_has_cap_mmu_radix()) {
872             val[1] = 0x40; /* OV5_MMU_RADIX_300 */
873         } else {
874             val[1] = 0x00; /* Hash */
875         }
876     } else {
877         if (first_ppc_cpu->env.mmu_model & POWERPC_MMU_V3) {
878             /* V3 MMU supports both hash and radix (with dynamic switching) */
879             val[1] = 0xC0;
880         } else {
881             /* Otherwise we can only do hash */
882             val[1] = 0x00;
883         }
884     }
885     _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
886                      val, sizeof(val)));
887 }
888 
889 static void spapr_dt_chosen(sPAPRMachineState *spapr, void *fdt)
890 {
891     MachineState *machine = MACHINE(spapr);
892     int chosen;
893     const char *boot_device = machine->boot_order;
894     char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
895     size_t cb = 0;
896     char *bootlist = get_boot_devices_list(&cb, true);
897 
898     _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
899 
900     _FDT(fdt_setprop_string(fdt, chosen, "bootargs", machine->kernel_cmdline));
901     _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
902                           spapr->initrd_base));
903     _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
904                           spapr->initrd_base + spapr->initrd_size));
905 
906     if (spapr->kernel_size) {
907         uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR),
908                               cpu_to_be64(spapr->kernel_size) };
909 
910         _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
911                          &kprop, sizeof(kprop)));
912         if (spapr->kernel_le) {
913             _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
914         }
915     }
916     if (boot_menu) {
917         _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu)));
918     }
919     _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
920     _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
921     _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
922 
923     if (cb && bootlist) {
924         int i;
925 
926         for (i = 0; i < cb; i++) {
927             if (bootlist[i] == '\n') {
928                 bootlist[i] = ' ';
929             }
930         }
931         _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
932     }
933 
934     if (boot_device && strlen(boot_device)) {
935         _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
936     }
937 
938     if (!spapr->has_graphics && stdout_path) {
939         _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
940     }
941 
942     spapr_dt_ov5_platform_support(fdt, chosen);
943 
944     g_free(stdout_path);
945     g_free(bootlist);
946 }
947 
948 static void spapr_dt_hypervisor(sPAPRMachineState *spapr, void *fdt)
949 {
950     /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
951      * KVM to work under pHyp with some guest co-operation */
952     int hypervisor;
953     uint8_t hypercall[16];
954 
955     _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
956     /* indicate KVM hypercall interface */
957     _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
958     if (kvmppc_has_cap_fixup_hcalls()) {
959         /*
960          * Older KVM versions with older guest kernels were broken
961          * with the magic page, don't allow the guest to map it.
962          */
963         if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
964                                   sizeof(hypercall))) {
965             _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
966                              hypercall, sizeof(hypercall)));
967         }
968     }
969 }
970 
971 static void *spapr_build_fdt(sPAPRMachineState *spapr,
972                              hwaddr rtas_addr,
973                              hwaddr rtas_size)
974 {
975     MachineState *machine = MACHINE(qdev_get_machine());
976     MachineClass *mc = MACHINE_GET_CLASS(machine);
977     sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
978     int ret;
979     void *fdt;
980     sPAPRPHBState *phb;
981     char *buf;
982     int smt = kvmppc_smt_threads();
983 
984     fdt = g_malloc0(FDT_MAX_SIZE);
985     _FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE)));
986 
987     /* Root node */
988     _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
989     _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
990     _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
991 
992     /*
993      * Add info to guest to indentify which host is it being run on
994      * and what is the uuid of the guest
995      */
996     if (kvmppc_get_host_model(&buf)) {
997         _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
998         g_free(buf);
999     }
1000     if (kvmppc_get_host_serial(&buf)) {
1001         _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
1002         g_free(buf);
1003     }
1004 
1005     buf = qemu_uuid_unparse_strdup(&qemu_uuid);
1006 
1007     _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
1008     if (qemu_uuid_set) {
1009         _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
1010     }
1011     g_free(buf);
1012 
1013     if (qemu_get_vm_name()) {
1014         _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
1015                                 qemu_get_vm_name()));
1016     }
1017 
1018     _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
1019     _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
1020 
1021     /* /interrupt controller */
1022     spapr_dt_xics(DIV_ROUND_UP(max_cpus * smt, smp_threads), fdt, PHANDLE_XICP);
1023 
1024     ret = spapr_populate_memory(spapr, fdt);
1025     if (ret < 0) {
1026         error_report("couldn't setup memory nodes in fdt");
1027         exit(1);
1028     }
1029 
1030     /* /vdevice */
1031     spapr_dt_vdevice(spapr->vio_bus, fdt);
1032 
1033     if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
1034         ret = spapr_rng_populate_dt(fdt);
1035         if (ret < 0) {
1036             error_report("could not set up rng device in the fdt");
1037             exit(1);
1038         }
1039     }
1040 
1041     QLIST_FOREACH(phb, &spapr->phbs, list) {
1042         ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt);
1043         if (ret < 0) {
1044             error_report("couldn't setup PCI devices in fdt");
1045             exit(1);
1046         }
1047     }
1048 
1049     /* cpus */
1050     spapr_populate_cpus_dt_node(fdt, spapr);
1051 
1052     if (smc->dr_lmb_enabled) {
1053         _FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
1054     }
1055 
1056     if (mc->has_hotpluggable_cpus) {
1057         int offset = fdt_path_offset(fdt, "/cpus");
1058         ret = spapr_drc_populate_dt(fdt, offset, NULL,
1059                                     SPAPR_DR_CONNECTOR_TYPE_CPU);
1060         if (ret < 0) {
1061             error_report("Couldn't set up CPU DR device tree properties");
1062             exit(1);
1063         }
1064     }
1065 
1066     /* /event-sources */
1067     spapr_dt_events(spapr, fdt);
1068 
1069     /* /rtas */
1070     spapr_dt_rtas(spapr, fdt);
1071 
1072     /* /chosen */
1073     spapr_dt_chosen(spapr, fdt);
1074 
1075     /* /hypervisor */
1076     if (kvm_enabled()) {
1077         spapr_dt_hypervisor(spapr, fdt);
1078     }
1079 
1080     /* Build memory reserve map */
1081     if (spapr->kernel_size) {
1082         _FDT((fdt_add_mem_rsv(fdt, KERNEL_LOAD_ADDR, spapr->kernel_size)));
1083     }
1084     if (spapr->initrd_size) {
1085         _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, spapr->initrd_size)));
1086     }
1087 
1088     /* ibm,client-architecture-support updates */
1089     ret = spapr_dt_cas_updates(spapr, fdt, spapr->ov5_cas);
1090     if (ret < 0) {
1091         error_report("couldn't setup CAS properties fdt");
1092         exit(1);
1093     }
1094 
1095     return fdt;
1096 }
1097 
1098 static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
1099 {
1100     return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
1101 }
1102 
1103 static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1104                                     PowerPCCPU *cpu)
1105 {
1106     CPUPPCState *env = &cpu->env;
1107 
1108     /* The TCG path should also be holding the BQL at this point */
1109     g_assert(qemu_mutex_iothread_locked());
1110 
1111     if (msr_pr) {
1112         hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1113         env->gpr[3] = H_PRIVILEGE;
1114     } else {
1115         env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
1116     }
1117 }
1118 
1119 static uint64_t spapr_get_patbe(PPCVirtualHypervisor *vhyp)
1120 {
1121     sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1122 
1123     return spapr->patb_entry;
1124 }
1125 
1126 #define HPTE(_table, _i)   (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1127 #define HPTE_VALID(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1128 #define HPTE_DIRTY(_hpte)  (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1129 #define CLEAN_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1130 #define DIRTY_HPTE(_hpte)  ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1131 
1132 /*
1133  * Get the fd to access the kernel htab, re-opening it if necessary
1134  */
1135 static int get_htab_fd(sPAPRMachineState *spapr)
1136 {
1137     if (spapr->htab_fd >= 0) {
1138         return spapr->htab_fd;
1139     }
1140 
1141     spapr->htab_fd = kvmppc_get_htab_fd(false);
1142     if (spapr->htab_fd < 0) {
1143         error_report("Unable to open fd for reading hash table from KVM: %s",
1144                      strerror(errno));
1145     }
1146 
1147     return spapr->htab_fd;
1148 }
1149 
1150 void close_htab_fd(sPAPRMachineState *spapr)
1151 {
1152     if (spapr->htab_fd >= 0) {
1153         close(spapr->htab_fd);
1154     }
1155     spapr->htab_fd = -1;
1156 }
1157 
1158 static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
1159 {
1160     sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1161 
1162     return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
1163 }
1164 
1165 static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
1166                                                 hwaddr ptex, int n)
1167 {
1168     sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1169     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
1170 
1171     if (!spapr->htab) {
1172         /*
1173          * HTAB is controlled by KVM. Fetch into temporary buffer
1174          */
1175         ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
1176         kvmppc_read_hptes(hptes, ptex, n);
1177         return hptes;
1178     }
1179 
1180     /*
1181      * HTAB is controlled by QEMU. Just point to the internally
1182      * accessible PTEG.
1183      */
1184     return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
1185 }
1186 
1187 static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
1188                               const ppc_hash_pte64_t *hptes,
1189                               hwaddr ptex, int n)
1190 {
1191     sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1192 
1193     if (!spapr->htab) {
1194         g_free((void *)hptes);
1195     }
1196 
1197     /* Nothing to do for qemu managed HPT */
1198 }
1199 
1200 static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1201                              uint64_t pte0, uint64_t pte1)
1202 {
1203     sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1204     hwaddr offset = ptex * HASH_PTE_SIZE_64;
1205 
1206     if (!spapr->htab) {
1207         kvmppc_write_hpte(ptex, pte0, pte1);
1208     } else {
1209         stq_p(spapr->htab + offset, pte0);
1210         stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
1211     }
1212 }
1213 
1214 static int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
1215 {
1216     int shift;
1217 
1218     /* We aim for a hash table of size 1/128 the size of RAM (rounded
1219      * up).  The PAPR recommendation is actually 1/64 of RAM size, but
1220      * that's much more than is needed for Linux guests */
1221     shift = ctz64(pow2ceil(ramsize)) - 7;
1222     shift = MAX(shift, 18); /* Minimum architected size */
1223     shift = MIN(shift, 46); /* Maximum architected size */
1224     return shift;
1225 }
1226 
1227 void spapr_free_hpt(sPAPRMachineState *spapr)
1228 {
1229     g_free(spapr->htab);
1230     spapr->htab = NULL;
1231     spapr->htab_shift = 0;
1232     close_htab_fd(spapr);
1233 }
1234 
1235 static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
1236                                  Error **errp)
1237 {
1238     long rc;
1239 
1240     /* Clean up any HPT info from a previous boot */
1241     spapr_free_hpt(spapr);
1242 
1243     rc = kvmppc_reset_htab(shift);
1244     if (rc < 0) {
1245         /* kernel-side HPT needed, but couldn't allocate one */
1246         error_setg_errno(errp, errno,
1247                          "Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
1248                          shift);
1249         /* This is almost certainly fatal, but if the caller really
1250          * wants to carry on with shift == 0, it's welcome to try */
1251     } else if (rc > 0) {
1252         /* kernel-side HPT allocated */
1253         if (rc != shift) {
1254             error_setg(errp,
1255                        "Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
1256                        shift, rc);
1257         }
1258 
1259         spapr->htab_shift = shift;
1260         spapr->htab = NULL;
1261     } else {
1262         /* kernel-side HPT not needed, allocate in userspace instead */
1263         size_t size = 1ULL << shift;
1264         int i;
1265 
1266         spapr->htab = qemu_memalign(size, size);
1267         if (!spapr->htab) {
1268             error_setg_errno(errp, errno,
1269                              "Could not allocate HPT of order %d", shift);
1270             return;
1271         }
1272 
1273         memset(spapr->htab, 0, size);
1274         spapr->htab_shift = shift;
1275 
1276         for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
1277             DIRTY_HPTE(HPTE(spapr->htab, i));
1278         }
1279     }
1280 }
1281 
1282 void spapr_setup_hpt_and_vrma(sPAPRMachineState *spapr)
1283 {
1284     spapr_reallocate_hpt(spapr,
1285                      spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size),
1286                      &error_fatal);
1287     if (spapr->vrma_adjust) {
1288         spapr->rma_size = kvmppc_rma_size(spapr_node0_size(),
1289                                           spapr->htab_shift);
1290     }
1291     /* We're setting up a hash table, so that means we're not radix */
1292     spapr->patb_entry = 0;
1293 }
1294 
1295 static void find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
1296 {
1297     bool matched = false;
1298 
1299     if (object_dynamic_cast(OBJECT(sbdev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
1300         matched = true;
1301     }
1302 
1303     if (!matched) {
1304         error_report("Device %s is not supported by this machine yet.",
1305                      qdev_fw_name(DEVICE(sbdev)));
1306         exit(1);
1307     }
1308 }
1309 
1310 static void ppc_spapr_reset(void)
1311 {
1312     MachineState *machine = MACHINE(qdev_get_machine());
1313     sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
1314     PowerPCCPU *first_ppc_cpu;
1315     uint32_t rtas_limit;
1316     hwaddr rtas_addr, fdt_addr;
1317     void *fdt;
1318     int rc;
1319 
1320     /* Check for unknown sysbus devices */
1321     foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL);
1322 
1323     if (kvm_enabled() && kvmppc_has_cap_mmu_radix()) {
1324         /* If using KVM with radix mode available, VCPUs can be started
1325          * without a HPT because KVM will start them in radix mode.
1326          * Set the GR bit in PATB so that we know there is no HPT. */
1327         spapr->patb_entry = PATBE1_GR;
1328     } else {
1329         spapr->patb_entry = 0;
1330         spapr_setup_hpt_and_vrma(spapr);
1331     }
1332 
1333     qemu_devices_reset();
1334 
1335     /*
1336      * We place the device tree and RTAS just below either the top of the RMA,
1337      * or just below 2GB, whichever is lowere, so that it can be
1338      * processed with 32-bit real mode code if necessary
1339      */
1340     rtas_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR);
1341     rtas_addr = rtas_limit - RTAS_MAX_SIZE;
1342     fdt_addr = rtas_addr - FDT_MAX_SIZE;
1343 
1344     /* if this reset wasn't generated by CAS, we should reset our
1345      * negotiated options and start from scratch */
1346     if (!spapr->cas_reboot) {
1347         spapr_ovec_cleanup(spapr->ov5_cas);
1348         spapr->ov5_cas = spapr_ovec_new();
1349     }
1350 
1351     fdt = spapr_build_fdt(spapr, rtas_addr, spapr->rtas_size);
1352 
1353     spapr_load_rtas(spapr, fdt, rtas_addr);
1354 
1355     rc = fdt_pack(fdt);
1356 
1357     /* Should only fail if we've built a corrupted tree */
1358     assert(rc == 0);
1359 
1360     if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
1361         error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
1362                      fdt_totalsize(fdt), FDT_MAX_SIZE);
1363         exit(1);
1364     }
1365 
1366     /* Load the fdt */
1367     qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
1368     cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
1369     g_free(fdt);
1370 
1371     /* Set up the entry state */
1372     first_ppc_cpu = POWERPC_CPU(first_cpu);
1373     first_ppc_cpu->env.gpr[3] = fdt_addr;
1374     first_ppc_cpu->env.gpr[5] = 0;
1375     first_cpu->halted = 0;
1376     first_ppc_cpu->env.nip = SPAPR_ENTRY_POINT;
1377 
1378     spapr->cas_reboot = false;
1379 }
1380 
1381 static void spapr_create_nvram(sPAPRMachineState *spapr)
1382 {
1383     DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram");
1384     DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
1385 
1386     if (dinfo) {
1387         qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo),
1388                             &error_fatal);
1389     }
1390 
1391     qdev_init_nofail(dev);
1392 
1393     spapr->nvram = (struct sPAPRNVRAM *)dev;
1394 }
1395 
1396 static void spapr_rtc_create(sPAPRMachineState *spapr)
1397 {
1398     object_initialize(&spapr->rtc, sizeof(spapr->rtc), TYPE_SPAPR_RTC);
1399     object_property_add_child(OBJECT(spapr), "rtc", OBJECT(&spapr->rtc),
1400                               &error_fatal);
1401     object_property_set_bool(OBJECT(&spapr->rtc), true, "realized",
1402                               &error_fatal);
1403     object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
1404                               "date", &error_fatal);
1405 }
1406 
1407 /* Returns whether we want to use VGA or not */
1408 static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
1409 {
1410     switch (vga_interface_type) {
1411     case VGA_NONE:
1412         return false;
1413     case VGA_DEVICE:
1414         return true;
1415     case VGA_STD:
1416     case VGA_VIRTIO:
1417         return pci_vga_init(pci_bus) != NULL;
1418     default:
1419         error_setg(errp,
1420                    "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1421         return false;
1422     }
1423 }
1424 
1425 static int spapr_post_load(void *opaque, int version_id)
1426 {
1427     sPAPRMachineState *spapr = (sPAPRMachineState *)opaque;
1428     int err = 0;
1429 
1430     if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) {
1431         CPUState *cs;
1432         CPU_FOREACH(cs) {
1433             PowerPCCPU *cpu = POWERPC_CPU(cs);
1434             icp_resend(ICP(cpu->intc));
1435         }
1436     }
1437 
1438     /* In earlier versions, there was no separate qdev for the PAPR
1439      * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1440      * So when migrating from those versions, poke the incoming offset
1441      * value into the RTC device */
1442     if (version_id < 3) {
1443         err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
1444     }
1445 
1446     return err;
1447 }
1448 
1449 static bool version_before_3(void *opaque, int version_id)
1450 {
1451     return version_id < 3;
1452 }
1453 
1454 static bool spapr_ov5_cas_needed(void *opaque)
1455 {
1456     sPAPRMachineState *spapr = opaque;
1457     sPAPROptionVector *ov5_mask = spapr_ovec_new();
1458     sPAPROptionVector *ov5_legacy = spapr_ovec_new();
1459     sPAPROptionVector *ov5_removed = spapr_ovec_new();
1460     bool cas_needed;
1461 
1462     /* Prior to the introduction of sPAPROptionVector, we had two option
1463      * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1464      * Both of these options encode machine topology into the device-tree
1465      * in such a way that the now-booted OS should still be able to interact
1466      * appropriately with QEMU regardless of what options were actually
1467      * negotiatied on the source side.
1468      *
1469      * As such, we can avoid migrating the CAS-negotiated options if these
1470      * are the only options available on the current machine/platform.
1471      * Since these are the only options available for pseries-2.7 and
1472      * earlier, this allows us to maintain old->new/new->old migration
1473      * compatibility.
1474      *
1475      * For QEMU 2.8+, there are additional CAS-negotiatable options available
1476      * via default pseries-2.8 machines and explicit command-line parameters.
1477      * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
1478      * of the actual CAS-negotiated values to continue working properly. For
1479      * example, availability of memory unplug depends on knowing whether
1480      * OV5_HP_EVT was negotiated via CAS.
1481      *
1482      * Thus, for any cases where the set of available CAS-negotiatable
1483      * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
1484      * include the CAS-negotiated options in the migration stream.
1485      */
1486     spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
1487     spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
1488 
1489     /* spapr_ovec_diff returns true if bits were removed. we avoid using
1490      * the mask itself since in the future it's possible "legacy" bits may be
1491      * removed via machine options, which could generate a false positive
1492      * that breaks migration.
1493      */
1494     spapr_ovec_intersect(ov5_legacy, spapr->ov5, ov5_mask);
1495     cas_needed = spapr_ovec_diff(ov5_removed, spapr->ov5, ov5_legacy);
1496 
1497     spapr_ovec_cleanup(ov5_mask);
1498     spapr_ovec_cleanup(ov5_legacy);
1499     spapr_ovec_cleanup(ov5_removed);
1500 
1501     return cas_needed;
1502 }
1503 
1504 static const VMStateDescription vmstate_spapr_ov5_cas = {
1505     .name = "spapr_option_vector_ov5_cas",
1506     .version_id = 1,
1507     .minimum_version_id = 1,
1508     .needed = spapr_ov5_cas_needed,
1509     .fields = (VMStateField[]) {
1510         VMSTATE_STRUCT_POINTER_V(ov5_cas, sPAPRMachineState, 1,
1511                                  vmstate_spapr_ovec, sPAPROptionVector),
1512         VMSTATE_END_OF_LIST()
1513     },
1514 };
1515 
1516 static bool spapr_patb_entry_needed(void *opaque)
1517 {
1518     sPAPRMachineState *spapr = opaque;
1519 
1520     return !!spapr->patb_entry;
1521 }
1522 
1523 static const VMStateDescription vmstate_spapr_patb_entry = {
1524     .name = "spapr_patb_entry",
1525     .version_id = 1,
1526     .minimum_version_id = 1,
1527     .needed = spapr_patb_entry_needed,
1528     .fields = (VMStateField[]) {
1529         VMSTATE_UINT64(patb_entry, sPAPRMachineState),
1530         VMSTATE_END_OF_LIST()
1531     },
1532 };
1533 
1534 static const VMStateDescription vmstate_spapr = {
1535     .name = "spapr",
1536     .version_id = 3,
1537     .minimum_version_id = 1,
1538     .post_load = spapr_post_load,
1539     .fields = (VMStateField[]) {
1540         /* used to be @next_irq */
1541         VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
1542 
1543         /* RTC offset */
1544         VMSTATE_UINT64_TEST(rtc_offset, sPAPRMachineState, version_before_3),
1545 
1546         VMSTATE_PPC_TIMEBASE_V(tb, sPAPRMachineState, 2),
1547         VMSTATE_END_OF_LIST()
1548     },
1549     .subsections = (const VMStateDescription*[]) {
1550         &vmstate_spapr_ov5_cas,
1551         &vmstate_spapr_patb_entry,
1552         NULL
1553     }
1554 };
1555 
1556 static int htab_save_setup(QEMUFile *f, void *opaque)
1557 {
1558     sPAPRMachineState *spapr = opaque;
1559 
1560     /* "Iteration" header */
1561     qemu_put_be32(f, spapr->htab_shift);
1562 
1563     if (spapr->htab) {
1564         spapr->htab_save_index = 0;
1565         spapr->htab_first_pass = true;
1566     } else {
1567         assert(kvm_enabled());
1568     }
1569 
1570 
1571     return 0;
1572 }
1573 
1574 static void htab_save_first_pass(QEMUFile *f, sPAPRMachineState *spapr,
1575                                  int64_t max_ns)
1576 {
1577     bool has_timeout = max_ns != -1;
1578     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
1579     int index = spapr->htab_save_index;
1580     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1581 
1582     assert(spapr->htab_first_pass);
1583 
1584     do {
1585         int chunkstart;
1586 
1587         /* Consume invalid HPTEs */
1588         while ((index < htabslots)
1589                && !HPTE_VALID(HPTE(spapr->htab, index))) {
1590             CLEAN_HPTE(HPTE(spapr->htab, index));
1591             index++;
1592         }
1593 
1594         /* Consume valid HPTEs */
1595         chunkstart = index;
1596         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
1597                && HPTE_VALID(HPTE(spapr->htab, index))) {
1598             CLEAN_HPTE(HPTE(spapr->htab, index));
1599             index++;
1600         }
1601 
1602         if (index > chunkstart) {
1603             int n_valid = index - chunkstart;
1604 
1605             qemu_put_be32(f, chunkstart);
1606             qemu_put_be16(f, n_valid);
1607             qemu_put_be16(f, 0);
1608             qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
1609                             HASH_PTE_SIZE_64 * n_valid);
1610 
1611             if (has_timeout &&
1612                 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
1613                 break;
1614             }
1615         }
1616     } while ((index < htabslots) && !qemu_file_rate_limit(f));
1617 
1618     if (index >= htabslots) {
1619         assert(index == htabslots);
1620         index = 0;
1621         spapr->htab_first_pass = false;
1622     }
1623     spapr->htab_save_index = index;
1624 }
1625 
1626 static int htab_save_later_pass(QEMUFile *f, sPAPRMachineState *spapr,
1627                                 int64_t max_ns)
1628 {
1629     bool final = max_ns < 0;
1630     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
1631     int examined = 0, sent = 0;
1632     int index = spapr->htab_save_index;
1633     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1634 
1635     assert(!spapr->htab_first_pass);
1636 
1637     do {
1638         int chunkstart, invalidstart;
1639 
1640         /* Consume non-dirty HPTEs */
1641         while ((index < htabslots)
1642                && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
1643             index++;
1644             examined++;
1645         }
1646 
1647         chunkstart = index;
1648         /* Consume valid dirty HPTEs */
1649         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
1650                && HPTE_DIRTY(HPTE(spapr->htab, index))
1651                && HPTE_VALID(HPTE(spapr->htab, index))) {
1652             CLEAN_HPTE(HPTE(spapr->htab, index));
1653             index++;
1654             examined++;
1655         }
1656 
1657         invalidstart = index;
1658         /* Consume invalid dirty HPTEs */
1659         while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
1660                && HPTE_DIRTY(HPTE(spapr->htab, index))
1661                && !HPTE_VALID(HPTE(spapr->htab, index))) {
1662             CLEAN_HPTE(HPTE(spapr->htab, index));
1663             index++;
1664             examined++;
1665         }
1666 
1667         if (index > chunkstart) {
1668             int n_valid = invalidstart - chunkstart;
1669             int n_invalid = index - invalidstart;
1670 
1671             qemu_put_be32(f, chunkstart);
1672             qemu_put_be16(f, n_valid);
1673             qemu_put_be16(f, n_invalid);
1674             qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
1675                             HASH_PTE_SIZE_64 * n_valid);
1676             sent += index - chunkstart;
1677 
1678             if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
1679                 break;
1680             }
1681         }
1682 
1683         if (examined >= htabslots) {
1684             break;
1685         }
1686 
1687         if (index >= htabslots) {
1688             assert(index == htabslots);
1689             index = 0;
1690         }
1691     } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final));
1692 
1693     if (index >= htabslots) {
1694         assert(index == htabslots);
1695         index = 0;
1696     }
1697 
1698     spapr->htab_save_index = index;
1699 
1700     return (examined >= htabslots) && (sent == 0) ? 1 : 0;
1701 }
1702 
1703 #define MAX_ITERATION_NS    5000000 /* 5 ms */
1704 #define MAX_KVM_BUF_SIZE    2048
1705 
1706 static int htab_save_iterate(QEMUFile *f, void *opaque)
1707 {
1708     sPAPRMachineState *spapr = opaque;
1709     int fd;
1710     int rc = 0;
1711 
1712     /* Iteration header */
1713     qemu_put_be32(f, 0);
1714 
1715     if (!spapr->htab) {
1716         assert(kvm_enabled());
1717 
1718         fd = get_htab_fd(spapr);
1719         if (fd < 0) {
1720             return fd;
1721         }
1722 
1723         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
1724         if (rc < 0) {
1725             return rc;
1726         }
1727     } else  if (spapr->htab_first_pass) {
1728         htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
1729     } else {
1730         rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
1731     }
1732 
1733     /* End marker */
1734     qemu_put_be32(f, 0);
1735     qemu_put_be16(f, 0);
1736     qemu_put_be16(f, 0);
1737 
1738     return rc;
1739 }
1740 
1741 static int htab_save_complete(QEMUFile *f, void *opaque)
1742 {
1743     sPAPRMachineState *spapr = opaque;
1744     int fd;
1745 
1746     /* Iteration header */
1747     qemu_put_be32(f, 0);
1748 
1749     if (!spapr->htab) {
1750         int rc;
1751 
1752         assert(kvm_enabled());
1753 
1754         fd = get_htab_fd(spapr);
1755         if (fd < 0) {
1756             return fd;
1757         }
1758 
1759         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
1760         if (rc < 0) {
1761             return rc;
1762         }
1763     } else {
1764         if (spapr->htab_first_pass) {
1765             htab_save_first_pass(f, spapr, -1);
1766         }
1767         htab_save_later_pass(f, spapr, -1);
1768     }
1769 
1770     /* End marker */
1771     qemu_put_be32(f, 0);
1772     qemu_put_be16(f, 0);
1773     qemu_put_be16(f, 0);
1774 
1775     return 0;
1776 }
1777 
1778 static int htab_load(QEMUFile *f, void *opaque, int version_id)
1779 {
1780     sPAPRMachineState *spapr = opaque;
1781     uint32_t section_hdr;
1782     int fd = -1;
1783 
1784     if (version_id < 1 || version_id > 1) {
1785         error_report("htab_load() bad version");
1786         return -EINVAL;
1787     }
1788 
1789     section_hdr = qemu_get_be32(f);
1790 
1791     if (section_hdr) {
1792         Error *local_err = NULL;
1793 
1794         /* First section gives the htab size */
1795         spapr_reallocate_hpt(spapr, section_hdr, &local_err);
1796         if (local_err) {
1797             error_report_err(local_err);
1798             return -EINVAL;
1799         }
1800         return 0;
1801     }
1802 
1803     if (!spapr->htab) {
1804         assert(kvm_enabled());
1805 
1806         fd = kvmppc_get_htab_fd(true);
1807         if (fd < 0) {
1808             error_report("Unable to open fd to restore KVM hash table: %s",
1809                          strerror(errno));
1810         }
1811     }
1812 
1813     while (true) {
1814         uint32_t index;
1815         uint16_t n_valid, n_invalid;
1816 
1817         index = qemu_get_be32(f);
1818         n_valid = qemu_get_be16(f);
1819         n_invalid = qemu_get_be16(f);
1820 
1821         if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
1822             /* End of Stream */
1823             break;
1824         }
1825 
1826         if ((index + n_valid + n_invalid) >
1827             (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
1828             /* Bad index in stream */
1829             error_report(
1830                 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
1831                 index, n_valid, n_invalid, spapr->htab_shift);
1832             return -EINVAL;
1833         }
1834 
1835         if (spapr->htab) {
1836             if (n_valid) {
1837                 qemu_get_buffer(f, HPTE(spapr->htab, index),
1838                                 HASH_PTE_SIZE_64 * n_valid);
1839             }
1840             if (n_invalid) {
1841                 memset(HPTE(spapr->htab, index + n_valid), 0,
1842                        HASH_PTE_SIZE_64 * n_invalid);
1843             }
1844         } else {
1845             int rc;
1846 
1847             assert(fd >= 0);
1848 
1849             rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid);
1850             if (rc < 0) {
1851                 return rc;
1852             }
1853         }
1854     }
1855 
1856     if (!spapr->htab) {
1857         assert(fd >= 0);
1858         close(fd);
1859     }
1860 
1861     return 0;
1862 }
1863 
1864 static void htab_cleanup(void *opaque)
1865 {
1866     sPAPRMachineState *spapr = opaque;
1867 
1868     close_htab_fd(spapr);
1869 }
1870 
1871 static SaveVMHandlers savevm_htab_handlers = {
1872     .save_live_setup = htab_save_setup,
1873     .save_live_iterate = htab_save_iterate,
1874     .save_live_complete_precopy = htab_save_complete,
1875     .cleanup = htab_cleanup,
1876     .load_state = htab_load,
1877 };
1878 
1879 static void spapr_boot_set(void *opaque, const char *boot_device,
1880                            Error **errp)
1881 {
1882     MachineState *machine = MACHINE(qdev_get_machine());
1883     machine->boot_order = g_strdup(boot_device);
1884 }
1885 
1886 /*
1887  * Reset routine for LMB DR devices.
1888  *
1889  * Unlike PCI DR devices, LMB DR devices explicitly register this reset
1890  * routine. Reset for PCI DR devices will be handled by PHB reset routine
1891  * when it walks all its children devices. LMB devices reset occurs
1892  * as part of spapr_ppc_reset().
1893  */
1894 static void spapr_drc_reset(void *opaque)
1895 {
1896     sPAPRDRConnector *drc = opaque;
1897     DeviceState *d = DEVICE(drc);
1898 
1899     if (d) {
1900         device_reset(d);
1901     }
1902 }
1903 
1904 static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
1905 {
1906     MachineState *machine = MACHINE(spapr);
1907     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
1908     uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
1909     int i;
1910 
1911     for (i = 0; i < nr_lmbs; i++) {
1912         sPAPRDRConnector *drc;
1913         uint64_t addr;
1914 
1915         addr = i * lmb_size + spapr->hotplug_memory.base;
1916         drc = spapr_dr_connector_new(OBJECT(spapr), SPAPR_DR_CONNECTOR_TYPE_LMB,
1917                                      addr/lmb_size);
1918         qemu_register_reset(spapr_drc_reset, drc);
1919     }
1920 }
1921 
1922 /*
1923  * If RAM size, maxmem size and individual node mem sizes aren't aligned
1924  * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
1925  * since we can't support such unaligned sizes with DRCONF_MEMORY.
1926  */
1927 static void spapr_validate_node_memory(MachineState *machine, Error **errp)
1928 {
1929     int i;
1930 
1931     if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
1932         error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
1933                    " is not aligned to %llu MiB",
1934                    machine->ram_size,
1935                    SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
1936         return;
1937     }
1938 
1939     if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
1940         error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
1941                    " is not aligned to %llu MiB",
1942                    machine->ram_size,
1943                    SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
1944         return;
1945     }
1946 
1947     for (i = 0; i < nb_numa_nodes; i++) {
1948         if (numa_info[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
1949             error_setg(errp,
1950                        "Node %d memory size 0x%" PRIx64
1951                        " is not aligned to %llu MiB",
1952                        i, numa_info[i].node_mem,
1953                        SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
1954             return;
1955         }
1956     }
1957 }
1958 
1959 /* find cpu slot in machine->possible_cpus by core_id */
1960 static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
1961 {
1962     int index = id / smp_threads;
1963 
1964     if (index >= ms->possible_cpus->len) {
1965         return NULL;
1966     }
1967     if (idx) {
1968         *idx = index;
1969     }
1970     return &ms->possible_cpus->cpus[index];
1971 }
1972 
1973 static void spapr_init_cpus(sPAPRMachineState *spapr)
1974 {
1975     MachineState *machine = MACHINE(spapr);
1976     MachineClass *mc = MACHINE_GET_CLASS(machine);
1977     char *type = spapr_get_cpu_core_type(machine->cpu_model);
1978     int smt = kvmppc_smt_threads();
1979     const CPUArchIdList *possible_cpus;
1980     int boot_cores_nr = smp_cpus / smp_threads;
1981     int i;
1982 
1983     if (!type) {
1984         error_report("Unable to find sPAPR CPU Core definition");
1985         exit(1);
1986     }
1987 
1988     possible_cpus = mc->possible_cpu_arch_ids(machine);
1989     if (mc->has_hotpluggable_cpus) {
1990         if (smp_cpus % smp_threads) {
1991             error_report("smp_cpus (%u) must be multiple of threads (%u)",
1992                          smp_cpus, smp_threads);
1993             exit(1);
1994         }
1995         if (max_cpus % smp_threads) {
1996             error_report("max_cpus (%u) must be multiple of threads (%u)",
1997                          max_cpus, smp_threads);
1998             exit(1);
1999         }
2000     } else {
2001         if (max_cpus != smp_cpus) {
2002             error_report("This machine version does not support CPU hotplug");
2003             exit(1);
2004         }
2005         boot_cores_nr = possible_cpus->len;
2006     }
2007 
2008     for (i = 0; i < possible_cpus->len; i++) {
2009         int core_id = i * smp_threads;
2010 
2011         if (mc->has_hotpluggable_cpus) {
2012             sPAPRDRConnector *drc =
2013                 spapr_dr_connector_new(OBJECT(spapr),
2014                                        SPAPR_DR_CONNECTOR_TYPE_CPU,
2015                                        (core_id / smp_threads) * smt);
2016 
2017             qemu_register_reset(spapr_drc_reset, drc);
2018         }
2019 
2020         if (i < boot_cores_nr) {
2021             Object *core  = object_new(type);
2022             int nr_threads = smp_threads;
2023 
2024             /* Handle the partially filled core for older machine types */
2025             if ((i + 1) * smp_threads >= smp_cpus) {
2026                 nr_threads = smp_cpus - i * smp_threads;
2027             }
2028 
2029             object_property_set_int(core, nr_threads, "nr-threads",
2030                                     &error_fatal);
2031             object_property_set_int(core, core_id, CPU_CORE_PROP_CORE_ID,
2032                                     &error_fatal);
2033             object_property_set_bool(core, true, "realized", &error_fatal);
2034         }
2035     }
2036     g_free(type);
2037 }
2038 
2039 /* pSeries LPAR / sPAPR hardware init */
2040 static void ppc_spapr_init(MachineState *machine)
2041 {
2042     sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
2043     sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2044     const char *kernel_filename = machine->kernel_filename;
2045     const char *initrd_filename = machine->initrd_filename;
2046     PCIHostState *phb;
2047     int i;
2048     MemoryRegion *sysmem = get_system_memory();
2049     MemoryRegion *ram = g_new(MemoryRegion, 1);
2050     MemoryRegion *rma_region;
2051     void *rma = NULL;
2052     hwaddr rma_alloc_size;
2053     hwaddr node0_size = spapr_node0_size();
2054     long load_limit, fw_size;
2055     char *filename;
2056 
2057     msi_nonbroken = true;
2058 
2059     QLIST_INIT(&spapr->phbs);
2060     QTAILQ_INIT(&spapr->pending_dimm_unplugs);
2061 
2062     /* Allocate RMA if necessary */
2063     rma_alloc_size = kvmppc_alloc_rma(&rma);
2064 
2065     if (rma_alloc_size == -1) {
2066         error_report("Unable to create RMA");
2067         exit(1);
2068     }
2069 
2070     if (rma_alloc_size && (rma_alloc_size < node0_size)) {
2071         spapr->rma_size = rma_alloc_size;
2072     } else {
2073         spapr->rma_size = node0_size;
2074 
2075         /* With KVM, we don't actually know whether KVM supports an
2076          * unbounded RMA (PR KVM) or is limited by the hash table size
2077          * (HV KVM using VRMA), so we always assume the latter
2078          *
2079          * In that case, we also limit the initial allocations for RTAS
2080          * etc... to 256M since we have no way to know what the VRMA size
2081          * is going to be as it depends on the size of the hash table
2082          * isn't determined yet.
2083          */
2084         if (kvm_enabled()) {
2085             spapr->vrma_adjust = 1;
2086             spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
2087         }
2088 
2089         /* Actually we don't support unbounded RMA anymore since we
2090          * added proper emulation of HV mode. The max we can get is
2091          * 16G which also happens to be what we configure for PAPR
2092          * mode so make sure we don't do anything bigger than that
2093          */
2094         spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
2095     }
2096 
2097     if (spapr->rma_size > node0_size) {
2098         error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")",
2099                      spapr->rma_size);
2100         exit(1);
2101     }
2102 
2103     /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2104     load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
2105 
2106     /* Set up Interrupt Controller before we create the VCPUs */
2107     xics_system_init(machine, XICS_IRQS_SPAPR, &error_fatal);
2108 
2109     /* Set up containers for ibm,client-set-architecture negotiated options */
2110     spapr->ov5 = spapr_ovec_new();
2111     spapr->ov5_cas = spapr_ovec_new();
2112 
2113     if (smc->dr_lmb_enabled) {
2114         spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
2115         spapr_validate_node_memory(machine, &error_fatal);
2116     }
2117 
2118     spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
2119     if (!kvm_enabled() || kvmppc_has_cap_mmu_radix()) {
2120         /* KVM and TCG always allow GTSE with radix... */
2121         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
2122     }
2123     /* ... but not with hash (currently). */
2124 
2125     /* advertise support for dedicated HP event source to guests */
2126     if (spapr->use_hotplug_event_source) {
2127         spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
2128     }
2129 
2130     /* init CPUs */
2131     if (machine->cpu_model == NULL) {
2132         machine->cpu_model = kvm_enabled() ? "host" : smc->tcg_default_cpu;
2133     }
2134 
2135     ppc_cpu_parse_features(machine->cpu_model);
2136 
2137     spapr_init_cpus(spapr);
2138 
2139     if (kvm_enabled()) {
2140         /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2141         kvmppc_enable_logical_ci_hcalls();
2142         kvmppc_enable_set_mode_hcall();
2143 
2144         /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2145         kvmppc_enable_clear_ref_mod_hcalls();
2146     }
2147 
2148     /* allocate RAM */
2149     memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram",
2150                                          machine->ram_size);
2151     memory_region_add_subregion(sysmem, 0, ram);
2152 
2153     if (rma_alloc_size && rma) {
2154         rma_region = g_new(MemoryRegion, 1);
2155         memory_region_init_ram_ptr(rma_region, NULL, "ppc_spapr.rma",
2156                                    rma_alloc_size, rma);
2157         vmstate_register_ram_global(rma_region);
2158         memory_region_add_subregion(sysmem, 0, rma_region);
2159     }
2160 
2161     /* initialize hotplug memory address space */
2162     if (machine->ram_size < machine->maxram_size) {
2163         ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size;
2164         /*
2165          * Limit the number of hotpluggable memory slots to half the number
2166          * slots that KVM supports, leaving the other half for PCI and other
2167          * devices. However ensure that number of slots doesn't drop below 32.
2168          */
2169         int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
2170                            SPAPR_MAX_RAM_SLOTS;
2171 
2172         if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
2173             max_memslots = SPAPR_MAX_RAM_SLOTS;
2174         }
2175         if (machine->ram_slots > max_memslots) {
2176             error_report("Specified number of memory slots %"
2177                          PRIu64" exceeds max supported %d",
2178                          machine->ram_slots, max_memslots);
2179             exit(1);
2180         }
2181 
2182         spapr->hotplug_memory.base = ROUND_UP(machine->ram_size,
2183                                               SPAPR_HOTPLUG_MEM_ALIGN);
2184         memory_region_init(&spapr->hotplug_memory.mr, OBJECT(spapr),
2185                            "hotplug-memory", hotplug_mem_size);
2186         memory_region_add_subregion(sysmem, spapr->hotplug_memory.base,
2187                                     &spapr->hotplug_memory.mr);
2188     }
2189 
2190     if (smc->dr_lmb_enabled) {
2191         spapr_create_lmb_dr_connectors(spapr);
2192     }
2193 
2194     filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
2195     if (!filename) {
2196         error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
2197         exit(1);
2198     }
2199     spapr->rtas_size = get_image_size(filename);
2200     if (spapr->rtas_size < 0) {
2201         error_report("Could not get size of LPAR rtas '%s'", filename);
2202         exit(1);
2203     }
2204     spapr->rtas_blob = g_malloc(spapr->rtas_size);
2205     if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) {
2206         error_report("Could not load LPAR rtas '%s'", filename);
2207         exit(1);
2208     }
2209     if (spapr->rtas_size > RTAS_MAX_SIZE) {
2210         error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
2211                      (size_t)spapr->rtas_size, RTAS_MAX_SIZE);
2212         exit(1);
2213     }
2214     g_free(filename);
2215 
2216     /* Set up RTAS event infrastructure */
2217     spapr_events_init(spapr);
2218 
2219     /* Set up the RTC RTAS interfaces */
2220     spapr_rtc_create(spapr);
2221 
2222     /* Set up VIO bus */
2223     spapr->vio_bus = spapr_vio_bus_init();
2224 
2225     for (i = 0; i < MAX_SERIAL_PORTS; i++) {
2226         if (serial_hds[i]) {
2227             spapr_vty_create(spapr->vio_bus, serial_hds[i]);
2228         }
2229     }
2230 
2231     /* We always have at least the nvram device on VIO */
2232     spapr_create_nvram(spapr);
2233 
2234     /* Set up PCI */
2235     spapr_pci_rtas_init();
2236 
2237     phb = spapr_create_phb(spapr, 0);
2238 
2239     for (i = 0; i < nb_nics; i++) {
2240         NICInfo *nd = &nd_table[i];
2241 
2242         if (!nd->model) {
2243             nd->model = g_strdup("ibmveth");
2244         }
2245 
2246         if (strcmp(nd->model, "ibmveth") == 0) {
2247             spapr_vlan_create(spapr->vio_bus, nd);
2248         } else {
2249             pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
2250         }
2251     }
2252 
2253     for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
2254         spapr_vscsi_create(spapr->vio_bus);
2255     }
2256 
2257     /* Graphics */
2258     if (spapr_vga_init(phb->bus, &error_fatal)) {
2259         spapr->has_graphics = true;
2260         machine->usb |= defaults_enabled() && !machine->usb_disabled;
2261     }
2262 
2263     if (machine->usb) {
2264         if (smc->use_ohci_by_default) {
2265             pci_create_simple(phb->bus, -1, "pci-ohci");
2266         } else {
2267             pci_create_simple(phb->bus, -1, "nec-usb-xhci");
2268         }
2269 
2270         if (spapr->has_graphics) {
2271             USBBus *usb_bus = usb_bus_find(-1);
2272 
2273             usb_create_simple(usb_bus, "usb-kbd");
2274             usb_create_simple(usb_bus, "usb-mouse");
2275         }
2276     }
2277 
2278     if (spapr->rma_size < (MIN_RMA_SLOF << 20)) {
2279         error_report(
2280             "pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)",
2281             MIN_RMA_SLOF);
2282         exit(1);
2283     }
2284 
2285     if (kernel_filename) {
2286         uint64_t lowaddr = 0;
2287 
2288         spapr->kernel_size = load_elf(kernel_filename, translate_kernel_address,
2289                                       NULL, NULL, &lowaddr, NULL, 1,
2290                                       PPC_ELF_MACHINE, 0, 0);
2291         if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
2292             spapr->kernel_size = load_elf(kernel_filename,
2293                                           translate_kernel_address, NULL, NULL,
2294                                           &lowaddr, NULL, 0, PPC_ELF_MACHINE,
2295                                           0, 0);
2296             spapr->kernel_le = spapr->kernel_size > 0;
2297         }
2298         if (spapr->kernel_size < 0) {
2299             error_report("error loading %s: %s", kernel_filename,
2300                          load_elf_strerror(spapr->kernel_size));
2301             exit(1);
2302         }
2303 
2304         /* load initrd */
2305         if (initrd_filename) {
2306             /* Try to locate the initrd in the gap between the kernel
2307              * and the firmware. Add a bit of space just in case
2308              */
2309             spapr->initrd_base = (KERNEL_LOAD_ADDR + spapr->kernel_size
2310                                   + 0x1ffff) & ~0xffff;
2311             spapr->initrd_size = load_image_targphys(initrd_filename,
2312                                                      spapr->initrd_base,
2313                                                      load_limit
2314                                                      - spapr->initrd_base);
2315             if (spapr->initrd_size < 0) {
2316                 error_report("could not load initial ram disk '%s'",
2317                              initrd_filename);
2318                 exit(1);
2319             }
2320         }
2321     }
2322 
2323     if (bios_name == NULL) {
2324         bios_name = FW_FILE_NAME;
2325     }
2326     filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
2327     if (!filename) {
2328         error_report("Could not find LPAR firmware '%s'", bios_name);
2329         exit(1);
2330     }
2331     fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
2332     if (fw_size <= 0) {
2333         error_report("Could not load LPAR firmware '%s'", filename);
2334         exit(1);
2335     }
2336     g_free(filename);
2337 
2338     /* FIXME: Should register things through the MachineState's qdev
2339      * interface, this is a legacy from the sPAPREnvironment structure
2340      * which predated MachineState but had a similar function */
2341     vmstate_register(NULL, 0, &vmstate_spapr, spapr);
2342     register_savevm_live(NULL, "spapr/htab", -1, 1,
2343                          &savevm_htab_handlers, spapr);
2344 
2345     /* used by RTAS */
2346     QTAILQ_INIT(&spapr->ccs_list);
2347     qemu_register_reset(spapr_ccs_reset_hook, spapr);
2348 
2349     qemu_register_boot_set(spapr_boot_set, spapr);
2350 
2351     if (kvm_enabled()) {
2352         /* to stop and start vmclock */
2353         qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
2354                                          &spapr->tb);
2355 
2356         kvmppc_spapr_enable_inkernel_multitce();
2357     }
2358 }
2359 
2360 static int spapr_kvm_type(const char *vm_type)
2361 {
2362     if (!vm_type) {
2363         return 0;
2364     }
2365 
2366     if (!strcmp(vm_type, "HV")) {
2367         return 1;
2368     }
2369 
2370     if (!strcmp(vm_type, "PR")) {
2371         return 2;
2372     }
2373 
2374     error_report("Unknown kvm-type specified '%s'", vm_type);
2375     exit(1);
2376 }
2377 
2378 /*
2379  * Implementation of an interface to adjust firmware path
2380  * for the bootindex property handling.
2381  */
2382 static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
2383                                    DeviceState *dev)
2384 {
2385 #define CAST(type, obj, name) \
2386     ((type *)object_dynamic_cast(OBJECT(obj), (name)))
2387     SCSIDevice *d = CAST(SCSIDevice,  dev, TYPE_SCSI_DEVICE);
2388     sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
2389 
2390     if (d) {
2391         void *spapr = CAST(void, bus->parent, "spapr-vscsi");
2392         VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
2393         USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
2394 
2395         if (spapr) {
2396             /*
2397              * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
2398              * We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun
2399              * in the top 16 bits of the 64-bit LUN
2400              */
2401             unsigned id = 0x8000 | (d->id << 8) | d->lun;
2402             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
2403                                    (uint64_t)id << 48);
2404         } else if (virtio) {
2405             /*
2406              * We use SRP luns of the form 01000000 | (target << 8) | lun
2407              * in the top 32 bits of the 64-bit LUN
2408              * Note: the quote above is from SLOF and it is wrong,
2409              * the actual binding is:
2410              * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
2411              */
2412             unsigned id = 0x1000000 | (d->id << 16) | d->lun;
2413             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
2414                                    (uint64_t)id << 32);
2415         } else if (usb) {
2416             /*
2417              * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
2418              * in the top 32 bits of the 64-bit LUN
2419              */
2420             unsigned usb_port = atoi(usb->port->path);
2421             unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
2422             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
2423                                    (uint64_t)id << 32);
2424         }
2425     }
2426 
2427     /*
2428      * SLOF probes the USB devices, and if it recognizes that the device is a
2429      * storage device, it changes its name to "storage" instead of "usb-host",
2430      * and additionally adds a child node for the SCSI LUN, so the correct
2431      * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
2432      */
2433     if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
2434         USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
2435         if (usb_host_dev_is_scsi_storage(usbdev)) {
2436             return g_strdup_printf("storage@%s/disk", usbdev->port->path);
2437         }
2438     }
2439 
2440     if (phb) {
2441         /* Replace "pci" with "pci@800000020000000" */
2442         return g_strdup_printf("pci@%"PRIX64, phb->buid);
2443     }
2444 
2445     return NULL;
2446 }
2447 
2448 static char *spapr_get_kvm_type(Object *obj, Error **errp)
2449 {
2450     sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2451 
2452     return g_strdup(spapr->kvm_type);
2453 }
2454 
2455 static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
2456 {
2457     sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2458 
2459     g_free(spapr->kvm_type);
2460     spapr->kvm_type = g_strdup(value);
2461 }
2462 
2463 static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
2464 {
2465     sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2466 
2467     return spapr->use_hotplug_event_source;
2468 }
2469 
2470 static void spapr_set_modern_hotplug_events(Object *obj, bool value,
2471                                             Error **errp)
2472 {
2473     sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2474 
2475     spapr->use_hotplug_event_source = value;
2476 }
2477 
2478 static void spapr_machine_initfn(Object *obj)
2479 {
2480     sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2481 
2482     spapr->htab_fd = -1;
2483     spapr->use_hotplug_event_source = true;
2484     object_property_add_str(obj, "kvm-type",
2485                             spapr_get_kvm_type, spapr_set_kvm_type, NULL);
2486     object_property_set_description(obj, "kvm-type",
2487                                     "Specifies the KVM virtualization mode (HV, PR)",
2488                                     NULL);
2489     object_property_add_bool(obj, "modern-hotplug-events",
2490                             spapr_get_modern_hotplug_events,
2491                             spapr_set_modern_hotplug_events,
2492                             NULL);
2493     object_property_set_description(obj, "modern-hotplug-events",
2494                                     "Use dedicated hotplug event mechanism in"
2495                                     " place of standard EPOW events when possible"
2496                                     " (required for memory hot-unplug support)",
2497                                     NULL);
2498 }
2499 
2500 static void spapr_machine_finalizefn(Object *obj)
2501 {
2502     sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2503 
2504     g_free(spapr->kvm_type);
2505 }
2506 
2507 void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
2508 {
2509     cpu_synchronize_state(cs);
2510     ppc_cpu_do_system_reset(cs);
2511 }
2512 
2513 static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
2514 {
2515     CPUState *cs;
2516 
2517     CPU_FOREACH(cs) {
2518         async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
2519     }
2520 }
2521 
2522 static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
2523                            uint32_t node, bool dedicated_hp_event_source,
2524                            Error **errp)
2525 {
2526     sPAPRDRConnector *drc;
2527     sPAPRDRConnectorClass *drck;
2528     uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
2529     int i, fdt_offset, fdt_size;
2530     void *fdt;
2531     uint64_t addr = addr_start;
2532 
2533     for (i = 0; i < nr_lmbs; i++) {
2534         drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
2535                 addr/SPAPR_MEMORY_BLOCK_SIZE);
2536         g_assert(drc);
2537 
2538         fdt = create_device_tree(&fdt_size);
2539         fdt_offset = spapr_populate_memory_node(fdt, node, addr,
2540                                                 SPAPR_MEMORY_BLOCK_SIZE);
2541 
2542         drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
2543         drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp);
2544         addr += SPAPR_MEMORY_BLOCK_SIZE;
2545         if (!dev->hotplugged) {
2546             /* guests expect coldplugged LMBs to be pre-allocated */
2547             drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
2548             drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
2549         }
2550     }
2551     /* send hotplug notification to the
2552      * guest only in case of hotplugged memory
2553      */
2554     if (dev->hotplugged) {
2555         if (dedicated_hp_event_source) {
2556             drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
2557                     addr_start / SPAPR_MEMORY_BLOCK_SIZE);
2558             drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
2559             spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
2560                                                    nr_lmbs,
2561                                                    drck->get_index(drc));
2562         } else {
2563             spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
2564                                            nr_lmbs);
2565         }
2566     }
2567 }
2568 
2569 static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
2570                               uint32_t node, Error **errp)
2571 {
2572     Error *local_err = NULL;
2573     sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev);
2574     PCDIMMDevice *dimm = PC_DIMM(dev);
2575     PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
2576     MemoryRegion *mr = ddc->get_memory_region(dimm);
2577     uint64_t align = memory_region_get_alignment(mr);
2578     uint64_t size = memory_region_size(mr);
2579     uint64_t addr;
2580 
2581     pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err);
2582     if (local_err) {
2583         goto out;
2584     }
2585 
2586     addr = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, &local_err);
2587     if (local_err) {
2588         pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
2589         goto out;
2590     }
2591 
2592     spapr_add_lmbs(dev, addr, size, node,
2593                    spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT),
2594                    &error_abort);
2595 
2596 out:
2597     error_propagate(errp, local_err);
2598 }
2599 
2600 static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
2601                                   Error **errp)
2602 {
2603     PCDIMMDevice *dimm = PC_DIMM(dev);
2604     PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
2605     MemoryRegion *mr = ddc->get_memory_region(dimm);
2606     uint64_t size = memory_region_size(mr);
2607     char *mem_dev;
2608 
2609     if (size % SPAPR_MEMORY_BLOCK_SIZE) {
2610         error_setg(errp, "Hotplugged memory size must be a multiple of "
2611                       "%lld MB", SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
2612         return;
2613     }
2614 
2615     mem_dev = object_property_get_str(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, NULL);
2616     if (mem_dev && !kvmppc_is_mem_backend_page_size_ok(mem_dev)) {
2617         error_setg(errp, "Memory backend has bad page size. "
2618                    "Use 'memory-backend-file' with correct mem-path.");
2619         return;
2620     }
2621 }
2622 
2623 struct sPAPRDIMMState {
2624     PCDIMMDevice *dimm;
2625     uint32_t nr_lmbs;
2626     QTAILQ_ENTRY(sPAPRDIMMState) next;
2627 };
2628 
2629 static sPAPRDIMMState *spapr_pending_dimm_unplugs_find(sPAPRMachineState *s,
2630                                                        PCDIMMDevice *dimm)
2631 {
2632     sPAPRDIMMState *dimm_state = NULL;
2633 
2634     QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
2635         if (dimm_state->dimm == dimm) {
2636             break;
2637         }
2638     }
2639     return dimm_state;
2640 }
2641 
2642 static void spapr_pending_dimm_unplugs_add(sPAPRMachineState *spapr,
2643                                            sPAPRDIMMState *dimm_state)
2644 {
2645     g_assert(!spapr_pending_dimm_unplugs_find(spapr, dimm_state->dimm));
2646     QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, dimm_state, next);
2647 }
2648 
2649 static void spapr_pending_dimm_unplugs_remove(sPAPRMachineState *spapr,
2650                                               sPAPRDIMMState *dimm_state)
2651 {
2652     QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
2653     g_free(dimm_state);
2654 }
2655 
2656 static sPAPRDIMMState *spapr_recover_pending_dimm_state(sPAPRMachineState *ms,
2657                                                         PCDIMMDevice *dimm)
2658 {
2659     sPAPRDRConnector *drc;
2660     PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
2661     MemoryRegion *mr = ddc->get_memory_region(dimm);
2662     uint64_t size = memory_region_size(mr);
2663     uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
2664     uint32_t avail_lmbs = 0;
2665     uint64_t addr_start, addr;
2666     int i;
2667     sPAPRDIMMState *ds;
2668 
2669     addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP,
2670                                          &error_abort);
2671 
2672     addr = addr_start;
2673     for (i = 0; i < nr_lmbs; i++) {
2674         drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
2675                                        addr / SPAPR_MEMORY_BLOCK_SIZE);
2676         g_assert(drc);
2677         if (drc->indicator_state != SPAPR_DR_INDICATOR_STATE_INACTIVE) {
2678             avail_lmbs++;
2679         }
2680         addr += SPAPR_MEMORY_BLOCK_SIZE;
2681     }
2682 
2683     ds = g_malloc0(sizeof(sPAPRDIMMState));
2684     ds->nr_lmbs = avail_lmbs;
2685     ds->dimm = dimm;
2686     spapr_pending_dimm_unplugs_add(ms, ds);
2687     return ds;
2688 }
2689 
2690 /* Callback to be called during DRC release. */
2691 void spapr_lmb_release(DeviceState *dev)
2692 {
2693     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
2694     sPAPRMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl);
2695     sPAPRDIMMState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
2696 
2697     /* This information will get lost if a migration occurs
2698      * during the unplug process. In this case recover it. */
2699     if (ds == NULL) {
2700         ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
2701         if (ds->nr_lmbs) {
2702             return;
2703         }
2704     } else if (--ds->nr_lmbs) {
2705         return;
2706     }
2707 
2708     spapr_pending_dimm_unplugs_remove(spapr, ds);
2709 
2710     /*
2711      * Now that all the LMBs have been removed by the guest, call the
2712      * pc-dimm unplug handler to cleanup up the pc-dimm device.
2713      */
2714     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
2715 }
2716 
2717 static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
2718                                 Error **errp)
2719 {
2720     sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev);
2721     PCDIMMDevice *dimm = PC_DIMM(dev);
2722     PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
2723     MemoryRegion *mr = ddc->get_memory_region(dimm);
2724 
2725     pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
2726     object_unparent(OBJECT(dev));
2727 }
2728 
2729 static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
2730                                         DeviceState *dev, Error **errp)
2731 {
2732     sPAPRMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
2733     Error *local_err = NULL;
2734     PCDIMMDevice *dimm = PC_DIMM(dev);
2735     PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
2736     MemoryRegion *mr = ddc->get_memory_region(dimm);
2737     uint64_t size = memory_region_size(mr);
2738     uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
2739     uint64_t addr_start, addr;
2740     int i;
2741     sPAPRDRConnector *drc;
2742     sPAPRDRConnectorClass *drck;
2743     sPAPRDIMMState *ds;
2744 
2745     addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP,
2746                                          &local_err);
2747     if (local_err) {
2748         goto out;
2749     }
2750 
2751     ds = g_malloc0(sizeof(sPAPRDIMMState));
2752     ds->nr_lmbs = nr_lmbs;
2753     ds->dimm = dimm;
2754     spapr_pending_dimm_unplugs_add(spapr, ds);
2755 
2756     addr = addr_start;
2757     for (i = 0; i < nr_lmbs; i++) {
2758         drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
2759                 addr / SPAPR_MEMORY_BLOCK_SIZE);
2760         g_assert(drc);
2761 
2762         drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
2763         drck->detach(drc, dev, errp);
2764         addr += SPAPR_MEMORY_BLOCK_SIZE;
2765     }
2766 
2767     drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
2768                                    addr_start / SPAPR_MEMORY_BLOCK_SIZE);
2769     drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
2770     spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
2771                                               nr_lmbs,
2772                                               drck->get_index(drc));
2773 out:
2774     error_propagate(errp, local_err);
2775 }
2776 
2777 void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
2778                                     sPAPRMachineState *spapr)
2779 {
2780     PowerPCCPU *cpu = POWERPC_CPU(cs);
2781     DeviceClass *dc = DEVICE_GET_CLASS(cs);
2782     int id = ppc_get_vcpu_dt_id(cpu);
2783     void *fdt;
2784     int offset, fdt_size;
2785     char *nodename;
2786 
2787     fdt = create_device_tree(&fdt_size);
2788     nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
2789     offset = fdt_add_subnode(fdt, 0, nodename);
2790 
2791     spapr_populate_cpu_dt(cs, fdt, offset, spapr);
2792     g_free(nodename);
2793 
2794     *fdt_offset = offset;
2795     return fdt;
2796 }
2797 
2798 static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
2799                               Error **errp)
2800 {
2801     MachineState *ms = MACHINE(qdev_get_machine());
2802     CPUCore *cc = CPU_CORE(dev);
2803     CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
2804 
2805     assert(core_slot);
2806     core_slot->cpu = NULL;
2807     object_unparent(OBJECT(dev));
2808 }
2809 
2810 /* Callback to be called during DRC release. */
2811 void spapr_core_release(DeviceState *dev)
2812 {
2813     HotplugHandler *hotplug_ctrl;
2814 
2815     hotplug_ctrl = qdev_get_hotplug_handler(dev);
2816     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
2817 }
2818 
2819 static
2820 void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
2821                                Error **errp)
2822 {
2823     int index;
2824     sPAPRDRConnector *drc;
2825     sPAPRDRConnectorClass *drck;
2826     Error *local_err = NULL;
2827     CPUCore *cc = CPU_CORE(dev);
2828     int smt = kvmppc_smt_threads();
2829 
2830     if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
2831         error_setg(errp, "Unable to find CPU core with core-id: %d",
2832                    cc->core_id);
2833         return;
2834     }
2835     if (index == 0) {
2836         error_setg(errp, "Boot CPU core may not be unplugged");
2837         return;
2838     }
2839 
2840     drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
2841     g_assert(drc);
2842 
2843     drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
2844     drck->detach(drc, dev, &local_err);
2845     if (local_err) {
2846         error_propagate(errp, local_err);
2847         return;
2848     }
2849 
2850     spapr_hotplug_req_remove_by_index(drc);
2851 }
2852 
2853 static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
2854                             Error **errp)
2855 {
2856     sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
2857     MachineClass *mc = MACHINE_GET_CLASS(spapr);
2858     sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
2859     CPUCore *cc = CPU_CORE(dev);
2860     CPUState *cs = CPU(core->threads);
2861     sPAPRDRConnector *drc;
2862     Error *local_err = NULL;
2863     void *fdt = NULL;
2864     int fdt_offset = 0;
2865     int smt = kvmppc_smt_threads();
2866     CPUArchId *core_slot;
2867     int index;
2868 
2869     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
2870     if (!core_slot) {
2871         error_setg(errp, "Unable to find CPU core with core-id: %d",
2872                    cc->core_id);
2873         return;
2874     }
2875     drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt);
2876 
2877     g_assert(drc || !mc->has_hotpluggable_cpus);
2878 
2879     /*
2880      * Setup CPU DT entries only for hotplugged CPUs. For boot time or
2881      * coldplugged CPUs DT entries are setup in spapr_build_fdt().
2882      */
2883     if (dev->hotplugged) {
2884         fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
2885     }
2886 
2887     if (drc) {
2888         sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
2889         drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, &local_err);
2890         if (local_err) {
2891             g_free(fdt);
2892             error_propagate(errp, local_err);
2893             return;
2894         }
2895     }
2896 
2897     if (dev->hotplugged) {
2898         /*
2899          * Send hotplug notification interrupt to the guest only in case
2900          * of hotplugged CPUs.
2901          */
2902         spapr_hotplug_req_add_by_index(drc);
2903     } else {
2904         /*
2905          * Set the right DRC states for cold plugged CPU.
2906          */
2907         if (drc) {
2908             sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
2909             drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
2910             drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
2911         }
2912     }
2913     core_slot->cpu = OBJECT(dev);
2914 }
2915 
2916 static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
2917                                 Error **errp)
2918 {
2919     MachineState *machine = MACHINE(OBJECT(hotplug_dev));
2920     MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
2921     Error *local_err = NULL;
2922     CPUCore *cc = CPU_CORE(dev);
2923     char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model);
2924     const char *type = object_get_typename(OBJECT(dev));
2925     CPUArchId *core_slot;
2926     int index;
2927 
2928     if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
2929         error_setg(&local_err, "CPU hotplug not supported for this machine");
2930         goto out;
2931     }
2932 
2933     if (strcmp(base_core_type, type)) {
2934         error_setg(&local_err, "CPU core type should be %s", base_core_type);
2935         goto out;
2936     }
2937 
2938     if (cc->core_id % smp_threads) {
2939         error_setg(&local_err, "invalid core id %d", cc->core_id);
2940         goto out;
2941     }
2942 
2943     /*
2944      * In general we should have homogeneous threads-per-core, but old
2945      * (pre hotplug support) machine types allow the last core to have
2946      * reduced threads as a compatibility hack for when we allowed
2947      * total vcpus not a multiple of threads-per-core.
2948      */
2949     if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
2950         error_setg(errp, "invalid nr-threads %d, must be %d",
2951                    cc->nr_threads, smp_threads);
2952         return;
2953     }
2954 
2955     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
2956     if (!core_slot) {
2957         error_setg(&local_err, "core id %d out of range", cc->core_id);
2958         goto out;
2959     }
2960 
2961     if (core_slot->cpu) {
2962         error_setg(&local_err, "core %d already populated", cc->core_id);
2963         goto out;
2964     }
2965 
2966     numa_cpu_pre_plug(core_slot, dev, &local_err);
2967 
2968 out:
2969     g_free(base_core_type);
2970     error_propagate(errp, local_err);
2971 }
2972 
2973 static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
2974                                       DeviceState *dev, Error **errp)
2975 {
2976     sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
2977 
2978     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
2979         int node;
2980 
2981         if (!smc->dr_lmb_enabled) {
2982             error_setg(errp, "Memory hotplug not supported for this machine");
2983             return;
2984         }
2985         node = object_property_get_int(OBJECT(dev), PC_DIMM_NODE_PROP, errp);
2986         if (*errp) {
2987             return;
2988         }
2989         if (node < 0 || node >= MAX_NODES) {
2990             error_setg(errp, "Invaild node %d", node);
2991             return;
2992         }
2993 
2994         /*
2995          * Currently PowerPC kernel doesn't allow hot-adding memory to
2996          * memory-less node, but instead will silently add the memory
2997          * to the first node that has some memory. This causes two
2998          * unexpected behaviours for the user.
2999          *
3000          * - Memory gets hotplugged to a different node than what the user
3001          *   specified.
3002          * - Since pc-dimm subsystem in QEMU still thinks that memory belongs
3003          *   to memory-less node, a reboot will set things accordingly
3004          *   and the previously hotplugged memory now ends in the right node.
3005          *   This appears as if some memory moved from one node to another.
3006          *
3007          * So until kernel starts supporting memory hotplug to memory-less
3008          * nodes, just prevent such attempts upfront in QEMU.
3009          */
3010         if (nb_numa_nodes && !numa_info[node].node_mem) {
3011             error_setg(errp, "Can't hotplug memory to memory-less node %d",
3012                        node);
3013             return;
3014         }
3015 
3016         spapr_memory_plug(hotplug_dev, dev, node, errp);
3017     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3018         spapr_core_plug(hotplug_dev, dev, errp);
3019     }
3020 }
3021 
3022 static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
3023                                       DeviceState *dev, Error **errp)
3024 {
3025     sPAPRMachineState *sms = SPAPR_MACHINE(qdev_get_machine());
3026     MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
3027 
3028     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
3029         if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) {
3030             spapr_memory_unplug(hotplug_dev, dev, errp);
3031         } else {
3032             error_setg(errp, "Memory hot unplug not supported for this guest");
3033         }
3034     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3035         if (!mc->has_hotpluggable_cpus) {
3036             error_setg(errp, "CPU hot unplug not supported on this machine");
3037             return;
3038         }
3039         spapr_core_unplug(hotplug_dev, dev, errp);
3040     }
3041 }
3042 
3043 static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
3044                                                 DeviceState *dev, Error **errp)
3045 {
3046     sPAPRMachineState *sms = SPAPR_MACHINE(qdev_get_machine());
3047     MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
3048 
3049     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
3050         if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) {
3051             spapr_memory_unplug_request(hotplug_dev, dev, errp);
3052         } else {
3053             /* NOTE: this means there is a window after guest reset, prior to
3054              * CAS negotiation, where unplug requests will fail due to the
3055              * capability not being detected yet. This is a bit different than
3056              * the case with PCI unplug, where the events will be queued and
3057              * eventually handled by the guest after boot
3058              */
3059             error_setg(errp, "Memory hot unplug not supported for this guest");
3060         }
3061     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3062         if (!mc->has_hotpluggable_cpus) {
3063             error_setg(errp, "CPU hot unplug not supported on this machine");
3064             return;
3065         }
3066         spapr_core_unplug_request(hotplug_dev, dev, errp);
3067     }
3068 }
3069 
3070 static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
3071                                           DeviceState *dev, Error **errp)
3072 {
3073     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
3074         spapr_memory_pre_plug(hotplug_dev, dev, errp);
3075     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3076         spapr_core_pre_plug(hotplug_dev, dev, errp);
3077     }
3078 }
3079 
3080 static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
3081                                                  DeviceState *dev)
3082 {
3083     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
3084         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3085         return HOTPLUG_HANDLER(machine);
3086     }
3087     return NULL;
3088 }
3089 
3090 static CpuInstanceProperties
3091 spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
3092 {
3093     CPUArchId *core_slot;
3094     MachineClass *mc = MACHINE_GET_CLASS(machine);
3095 
3096     /* make sure possible_cpu are intialized */
3097     mc->possible_cpu_arch_ids(machine);
3098     /* get CPU core slot containing thread that matches cpu_index */
3099     core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
3100     assert(core_slot);
3101     return core_slot->props;
3102 }
3103 
3104 static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
3105 {
3106     int i;
3107     int spapr_max_cores = max_cpus / smp_threads;
3108     MachineClass *mc = MACHINE_GET_CLASS(machine);
3109 
3110     if (!mc->has_hotpluggable_cpus) {
3111         spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
3112     }
3113     if (machine->possible_cpus) {
3114         assert(machine->possible_cpus->len == spapr_max_cores);
3115         return machine->possible_cpus;
3116     }
3117 
3118     machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
3119                              sizeof(CPUArchId) * spapr_max_cores);
3120     machine->possible_cpus->len = spapr_max_cores;
3121     for (i = 0; i < machine->possible_cpus->len; i++) {
3122         int core_id = i * smp_threads;
3123 
3124         machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
3125         machine->possible_cpus->cpus[i].arch_id = core_id;
3126         machine->possible_cpus->cpus[i].props.has_core_id = true;
3127         machine->possible_cpus->cpus[i].props.core_id = core_id;
3128 
3129         /* default distribution of CPUs over NUMA nodes */
3130         if (nb_numa_nodes) {
3131             /* preset values but do not enable them i.e. 'has_node_id = false',
3132              * numa init code will enable them later if manual mapping wasn't
3133              * present on CLI */
3134             machine->possible_cpus->cpus[i].props.node_id =
3135                 core_id / smp_threads / smp_cores % nb_numa_nodes;
3136         }
3137     }
3138     return machine->possible_cpus;
3139 }
3140 
3141 static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
3142                                 uint64_t *buid, hwaddr *pio,
3143                                 hwaddr *mmio32, hwaddr *mmio64,
3144                                 unsigned n_dma, uint32_t *liobns, Error **errp)
3145 {
3146     /*
3147      * New-style PHB window placement.
3148      *
3149      * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
3150      * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
3151      * windows.
3152      *
3153      * Some guest kernels can't work with MMIO windows above 1<<46
3154      * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
3155      *
3156      * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
3157      * PHB stacked together.  (32TiB+2GiB)..(32TiB+64GiB) contains the
3158      * 2GiB 32-bit MMIO windows for each PHB.  Then 33..64TiB has the
3159      * 1TiB 64-bit MMIO windows for each PHB.
3160      */
3161     const uint64_t base_buid = 0x800000020000000ULL;
3162 #define SPAPR_MAX_PHBS ((SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / \
3163                         SPAPR_PCI_MEM64_WIN_SIZE - 1)
3164     int i;
3165 
3166     /* Sanity check natural alignments */
3167     QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
3168     QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
3169     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
3170     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
3171     /* Sanity check bounds */
3172     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
3173                       SPAPR_PCI_MEM32_WIN_SIZE);
3174     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
3175                       SPAPR_PCI_MEM64_WIN_SIZE);
3176 
3177     if (index >= SPAPR_MAX_PHBS) {
3178         error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
3179                    SPAPR_MAX_PHBS - 1);
3180         return;
3181     }
3182 
3183     *buid = base_buid + index;
3184     for (i = 0; i < n_dma; ++i) {
3185         liobns[i] = SPAPR_PCI_LIOBN(index, i);
3186     }
3187 
3188     *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
3189     *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
3190     *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
3191 }
3192 
3193 static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
3194 {
3195     sPAPRMachineState *spapr = SPAPR_MACHINE(dev);
3196 
3197     return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
3198 }
3199 
3200 static void spapr_ics_resend(XICSFabric *dev)
3201 {
3202     sPAPRMachineState *spapr = SPAPR_MACHINE(dev);
3203 
3204     ics_resend(spapr->ics);
3205 }
3206 
3207 static ICPState *spapr_icp_get(XICSFabric *xi, int cpu_dt_id)
3208 {
3209     PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id);
3210 
3211     return cpu ? ICP(cpu->intc) : NULL;
3212 }
3213 
3214 static void spapr_pic_print_info(InterruptStatsProvider *obj,
3215                                  Monitor *mon)
3216 {
3217     sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
3218     CPUState *cs;
3219 
3220     CPU_FOREACH(cs) {
3221         PowerPCCPU *cpu = POWERPC_CPU(cs);
3222 
3223         icp_pic_print_info(ICP(cpu->intc), mon);
3224     }
3225 
3226     ics_pic_print_info(spapr->ics, mon);
3227 }
3228 
3229 static void spapr_machine_class_init(ObjectClass *oc, void *data)
3230 {
3231     MachineClass *mc = MACHINE_CLASS(oc);
3232     sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
3233     FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
3234     NMIClass *nc = NMI_CLASS(oc);
3235     HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
3236     PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
3237     XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
3238     InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
3239 
3240     mc->desc = "pSeries Logical Partition (PAPR compliant)";
3241 
3242     /*
3243      * We set up the default / latest behaviour here.  The class_init
3244      * functions for the specific versioned machine types can override
3245      * these details for backwards compatibility
3246      */
3247     mc->init = ppc_spapr_init;
3248     mc->reset = ppc_spapr_reset;
3249     mc->block_default_type = IF_SCSI;
3250     mc->max_cpus = 1024;
3251     mc->no_parallel = 1;
3252     mc->default_boot_order = "";
3253     mc->default_ram_size = 512 * M_BYTE;
3254     mc->kvm_type = spapr_kvm_type;
3255     mc->has_dynamic_sysbus = true;
3256     mc->pci_allow_0_address = true;
3257     mc->get_hotplug_handler = spapr_get_hotplug_handler;
3258     hc->pre_plug = spapr_machine_device_pre_plug;
3259     hc->plug = spapr_machine_device_plug;
3260     hc->unplug = spapr_machine_device_unplug;
3261     mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
3262     mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
3263     hc->unplug_request = spapr_machine_device_unplug_request;
3264 
3265     smc->dr_lmb_enabled = true;
3266     smc->tcg_default_cpu = "POWER8";
3267     mc->has_hotpluggable_cpus = true;
3268     fwc->get_dev_path = spapr_get_fw_dev_path;
3269     nc->nmi_monitor_handler = spapr_nmi;
3270     smc->phb_placement = spapr_phb_placement;
3271     vhc->hypercall = emulate_spapr_hypercall;
3272     vhc->hpt_mask = spapr_hpt_mask;
3273     vhc->map_hptes = spapr_map_hptes;
3274     vhc->unmap_hptes = spapr_unmap_hptes;
3275     vhc->store_hpte = spapr_store_hpte;
3276     vhc->get_patbe = spapr_get_patbe;
3277     xic->ics_get = spapr_ics_get;
3278     xic->ics_resend = spapr_ics_resend;
3279     xic->icp_get = spapr_icp_get;
3280     ispc->print_info = spapr_pic_print_info;
3281     /* Force NUMA node memory size to be a multiple of
3282      * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
3283      * in which LMBs are represented and hot-added
3284      */
3285     mc->numa_mem_align_shift = 28;
3286 }
3287 
3288 static const TypeInfo spapr_machine_info = {
3289     .name          = TYPE_SPAPR_MACHINE,
3290     .parent        = TYPE_MACHINE,
3291     .abstract      = true,
3292     .instance_size = sizeof(sPAPRMachineState),
3293     .instance_init = spapr_machine_initfn,
3294     .instance_finalize = spapr_machine_finalizefn,
3295     .class_size    = sizeof(sPAPRMachineClass),
3296     .class_init    = spapr_machine_class_init,
3297     .interfaces = (InterfaceInfo[]) {
3298         { TYPE_FW_PATH_PROVIDER },
3299         { TYPE_NMI },
3300         { TYPE_HOTPLUG_HANDLER },
3301         { TYPE_PPC_VIRTUAL_HYPERVISOR },
3302         { TYPE_XICS_FABRIC },
3303         { TYPE_INTERRUPT_STATS_PROVIDER },
3304         { }
3305     },
3306 };
3307 
3308 #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest)                 \
3309     static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
3310                                                     void *data)      \
3311     {                                                                \
3312         MachineClass *mc = MACHINE_CLASS(oc);                        \
3313         spapr_machine_##suffix##_class_options(mc);                  \
3314         if (latest) {                                                \
3315             mc->alias = "pseries";                                   \
3316             mc->is_default = 1;                                      \
3317         }                                                            \
3318     }                                                                \
3319     static void spapr_machine_##suffix##_instance_init(Object *obj)  \
3320     {                                                                \
3321         MachineState *machine = MACHINE(obj);                        \
3322         spapr_machine_##suffix##_instance_options(machine);          \
3323     }                                                                \
3324     static const TypeInfo spapr_machine_##suffix##_info = {          \
3325         .name = MACHINE_TYPE_NAME("pseries-" verstr),                \
3326         .parent = TYPE_SPAPR_MACHINE,                                \
3327         .class_init = spapr_machine_##suffix##_class_init,           \
3328         .instance_init = spapr_machine_##suffix##_instance_init,     \
3329     };                                                               \
3330     static void spapr_machine_register_##suffix(void)                \
3331     {                                                                \
3332         type_register(&spapr_machine_##suffix##_info);               \
3333     }                                                                \
3334     type_init(spapr_machine_register_##suffix)
3335 
3336 /*
3337  * pseries-2.10
3338  */
3339 static void spapr_machine_2_10_instance_options(MachineState *machine)
3340 {
3341 }
3342 
3343 static void spapr_machine_2_10_class_options(MachineClass *mc)
3344 {
3345     /* Defaults for the latest behaviour inherited from the base class */
3346 }
3347 
3348 DEFINE_SPAPR_MACHINE(2_10, "2.10", true);
3349 
3350 /*
3351  * pseries-2.9
3352  */
3353 #define SPAPR_COMPAT_2_9                                               \
3354     HW_COMPAT_2_9
3355 
3356 static void spapr_machine_2_9_instance_options(MachineState *machine)
3357 {
3358     spapr_machine_2_10_instance_options(machine);
3359 }
3360 
3361 static void spapr_machine_2_9_class_options(MachineClass *mc)
3362 {
3363     spapr_machine_2_10_class_options(mc);
3364     SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_9);
3365     mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram;
3366 }
3367 
3368 DEFINE_SPAPR_MACHINE(2_9, "2.9", false);
3369 
3370 /*
3371  * pseries-2.8
3372  */
3373 #define SPAPR_COMPAT_2_8                                        \
3374     HW_COMPAT_2_8                                               \
3375     {                                                           \
3376         .driver   = TYPE_SPAPR_PCI_HOST_BRIDGE,                 \
3377         .property = "pcie-extended-configuration-space",        \
3378         .value    = "off",                                      \
3379     },
3380 
3381 static void spapr_machine_2_8_instance_options(MachineState *machine)
3382 {
3383     spapr_machine_2_9_instance_options(machine);
3384 }
3385 
3386 static void spapr_machine_2_8_class_options(MachineClass *mc)
3387 {
3388     spapr_machine_2_9_class_options(mc);
3389     SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_8);
3390     mc->numa_mem_align_shift = 23;
3391 }
3392 
3393 DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
3394 
3395 /*
3396  * pseries-2.7
3397  */
3398 #define SPAPR_COMPAT_2_7                            \
3399     HW_COMPAT_2_7                                   \
3400     {                                               \
3401         .driver   = TYPE_SPAPR_PCI_HOST_BRIDGE,     \
3402         .property = "mem_win_size",                 \
3403         .value    = stringify(SPAPR_PCI_2_7_MMIO_WIN_SIZE),\
3404     },                                              \
3405     {                                               \
3406         .driver   = TYPE_SPAPR_PCI_HOST_BRIDGE,     \
3407         .property = "mem64_win_size",               \
3408         .value    = "0",                            \
3409     },                                              \
3410     {                                               \
3411         .driver = TYPE_POWERPC_CPU,                 \
3412         .property = "pre-2.8-migration",            \
3413         .value    = "on",                           \
3414     },                                              \
3415     {                                               \
3416         .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,       \
3417         .property = "pre-2.8-migration",            \
3418         .value    = "on",                           \
3419     },
3420 
3421 static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index,
3422                               uint64_t *buid, hwaddr *pio,
3423                               hwaddr *mmio32, hwaddr *mmio64,
3424                               unsigned n_dma, uint32_t *liobns, Error **errp)
3425 {
3426     /* Legacy PHB placement for pseries-2.7 and earlier machine types */
3427     const uint64_t base_buid = 0x800000020000000ULL;
3428     const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */
3429     const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */
3430     const hwaddr pio_offset = 0x80000000; /* 2 GiB */
3431     const uint32_t max_index = 255;
3432     const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */
3433 
3434     uint64_t ram_top = MACHINE(spapr)->ram_size;
3435     hwaddr phb0_base, phb_base;
3436     int i;
3437 
3438     /* Do we have hotpluggable memory? */
3439     if (MACHINE(spapr)->maxram_size > ram_top) {
3440         /* Can't just use maxram_size, because there may be an
3441          * alignment gap between normal and hotpluggable memory
3442          * regions */
3443         ram_top = spapr->hotplug_memory.base +
3444             memory_region_size(&spapr->hotplug_memory.mr);
3445     }
3446 
3447     phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
3448 
3449     if (index > max_index) {
3450         error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
3451                    max_index);
3452         return;
3453     }
3454 
3455     *buid = base_buid + index;
3456     for (i = 0; i < n_dma; ++i) {
3457         liobns[i] = SPAPR_PCI_LIOBN(index, i);
3458     }
3459 
3460     phb_base = phb0_base + index * phb_spacing;
3461     *pio = phb_base + pio_offset;
3462     *mmio32 = phb_base + mmio_offset;
3463     /*
3464      * We don't set the 64-bit MMIO window, relying on the PHB's
3465      * fallback behaviour of automatically splitting a large "32-bit"
3466      * window into contiguous 32-bit and 64-bit windows
3467      */
3468 }
3469 
3470 static void spapr_machine_2_7_instance_options(MachineState *machine)
3471 {
3472     sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
3473 
3474     spapr_machine_2_8_instance_options(machine);
3475     spapr->use_hotplug_event_source = false;
3476 }
3477 
3478 static void spapr_machine_2_7_class_options(MachineClass *mc)
3479 {
3480     sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3481 
3482     spapr_machine_2_8_class_options(mc);
3483     smc->tcg_default_cpu = "POWER7";
3484     SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_7);
3485     smc->phb_placement = phb_placement_2_7;
3486 }
3487 
3488 DEFINE_SPAPR_MACHINE(2_7, "2.7", false);
3489 
3490 /*
3491  * pseries-2.6
3492  */
3493 #define SPAPR_COMPAT_2_6 \
3494     HW_COMPAT_2_6 \
3495     { \
3496         .driver   = TYPE_SPAPR_PCI_HOST_BRIDGE,\
3497         .property = "ddw",\
3498         .value    = stringify(off),\
3499     },
3500 
3501 static void spapr_machine_2_6_instance_options(MachineState *machine)
3502 {
3503     spapr_machine_2_7_instance_options(machine);
3504 }
3505 
3506 static void spapr_machine_2_6_class_options(MachineClass *mc)
3507 {
3508     spapr_machine_2_7_class_options(mc);
3509     mc->has_hotpluggable_cpus = false;
3510     SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_6);
3511 }
3512 
3513 DEFINE_SPAPR_MACHINE(2_6, "2.6", false);
3514 
3515 /*
3516  * pseries-2.5
3517  */
3518 #define SPAPR_COMPAT_2_5 \
3519     HW_COMPAT_2_5 \
3520     { \
3521         .driver   = "spapr-vlan", \
3522         .property = "use-rx-buffer-pools", \
3523         .value    = "off", \
3524     },
3525 
3526 static void spapr_machine_2_5_instance_options(MachineState *machine)
3527 {
3528     spapr_machine_2_6_instance_options(machine);
3529 }
3530 
3531 static void spapr_machine_2_5_class_options(MachineClass *mc)
3532 {
3533     sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3534 
3535     spapr_machine_2_6_class_options(mc);
3536     smc->use_ohci_by_default = true;
3537     SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_5);
3538 }
3539 
3540 DEFINE_SPAPR_MACHINE(2_5, "2.5", false);
3541 
3542 /*
3543  * pseries-2.4
3544  */
3545 #define SPAPR_COMPAT_2_4 \
3546         HW_COMPAT_2_4
3547 
3548 static void spapr_machine_2_4_instance_options(MachineState *machine)
3549 {
3550     spapr_machine_2_5_instance_options(machine);
3551 }
3552 
3553 static void spapr_machine_2_4_class_options(MachineClass *mc)
3554 {
3555     sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3556 
3557     spapr_machine_2_5_class_options(mc);
3558     smc->dr_lmb_enabled = false;
3559     SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_4);
3560 }
3561 
3562 DEFINE_SPAPR_MACHINE(2_4, "2.4", false);
3563 
3564 /*
3565  * pseries-2.3
3566  */
3567 #define SPAPR_COMPAT_2_3 \
3568         HW_COMPAT_2_3 \
3569         {\
3570             .driver   = "spapr-pci-host-bridge",\
3571             .property = "dynamic-reconfiguration",\
3572             .value    = "off",\
3573         },
3574 
3575 static void spapr_machine_2_3_instance_options(MachineState *machine)
3576 {
3577     spapr_machine_2_4_instance_options(machine);
3578     savevm_skip_section_footers();
3579     global_state_set_optional();
3580     savevm_skip_configuration();
3581 }
3582 
3583 static void spapr_machine_2_3_class_options(MachineClass *mc)
3584 {
3585     spapr_machine_2_4_class_options(mc);
3586     SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_3);
3587 }
3588 DEFINE_SPAPR_MACHINE(2_3, "2.3", false);
3589 
3590 /*
3591  * pseries-2.2
3592  */
3593 
3594 #define SPAPR_COMPAT_2_2 \
3595         HW_COMPAT_2_2 \
3596         {\
3597             .driver   = TYPE_SPAPR_PCI_HOST_BRIDGE,\
3598             .property = "mem_win_size",\
3599             .value    = "0x20000000",\
3600         },
3601 
3602 static void spapr_machine_2_2_instance_options(MachineState *machine)
3603 {
3604     spapr_machine_2_3_instance_options(machine);
3605     machine->suppress_vmdesc = true;
3606 }
3607 
3608 static void spapr_machine_2_2_class_options(MachineClass *mc)
3609 {
3610     spapr_machine_2_3_class_options(mc);
3611     SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_2);
3612 }
3613 DEFINE_SPAPR_MACHINE(2_2, "2.2", false);
3614 
3615 /*
3616  * pseries-2.1
3617  */
3618 #define SPAPR_COMPAT_2_1 \
3619         HW_COMPAT_2_1
3620 
3621 static void spapr_machine_2_1_instance_options(MachineState *machine)
3622 {
3623     spapr_machine_2_2_instance_options(machine);
3624 }
3625 
3626 static void spapr_machine_2_1_class_options(MachineClass *mc)
3627 {
3628     spapr_machine_2_2_class_options(mc);
3629     SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_1);
3630 }
3631 DEFINE_SPAPR_MACHINE(2_1, "2.1", false);
3632 
3633 static void spapr_machine_register_types(void)
3634 {
3635     type_register_static(&spapr_machine_info);
3636 }
3637 
3638 type_init(spapr_machine_register_types)
3639