153018216SPaolo Bonzini /* 253018216SPaolo Bonzini * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 353018216SPaolo Bonzini * 453018216SPaolo Bonzini * Copyright (c) 2004-2007 Fabrice Bellard 553018216SPaolo Bonzini * Copyright (c) 2007 Jocelyn Mayer 653018216SPaolo Bonzini * Copyright (c) 2010 David Gibson, IBM Corporation. 753018216SPaolo Bonzini * 853018216SPaolo Bonzini * Permission is hereby granted, free of charge, to any person obtaining a copy 953018216SPaolo Bonzini * of this software and associated documentation files (the "Software"), to deal 1053018216SPaolo Bonzini * in the Software without restriction, including without limitation the rights 1153018216SPaolo Bonzini * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1253018216SPaolo Bonzini * copies of the Software, and to permit persons to whom the Software is 1353018216SPaolo Bonzini * furnished to do so, subject to the following conditions: 1453018216SPaolo Bonzini * 1553018216SPaolo Bonzini * The above copyright notice and this permission notice shall be included in 1653018216SPaolo Bonzini * all copies or substantial portions of the Software. 1753018216SPaolo Bonzini * 1853018216SPaolo Bonzini * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1953018216SPaolo Bonzini * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2053018216SPaolo Bonzini * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 2153018216SPaolo Bonzini * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2253018216SPaolo Bonzini * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2353018216SPaolo Bonzini * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2453018216SPaolo Bonzini * THE SOFTWARE. 2553018216SPaolo Bonzini */ 26a8d25326SMarkus Armbruster 270d75590dSPeter Maydell #include "qemu/osdep.h" 282c65db5eSPaolo Bonzini #include "qemu/datadir.h" 295df022cfSPeter Maydell #include "qemu/memalign.h" 30c4b07531SJason A. Donenfeld #include "qemu/guest-random.h" 31da34e65cSMarkus Armbruster #include "qapi/error.h" 32eb7f80fdSDaniel Henrique Barboza #include "qapi/qapi-events-machine.h" 334b08cd56SDaniel Henrique Barboza #include "qapi/qapi-events-qdev.h" 34fa98fbfcSSam Bobroff #include "qapi/visitor.h" 3553018216SPaolo Bonzini #include "sysemu/sysemu.h" 36b58c5c2dSMarkus Armbruster #include "sysemu/hostmem.h" 37e35704baSEduardo Habkost #include "sysemu/numa.h" 3823ff81bdSGreg Kurz #include "sysemu/qtest.h" 3971e8a915SMarkus Armbruster #include "sysemu/reset.h" 4054d31236SMarkus Armbruster #include "sysemu/runstate.h" 4103dd024fSPaolo Bonzini #include "qemu/log.h" 4271461b0fSAlexey Kardashevskiy #include "hw/fw-path-provider.h" 4353018216SPaolo Bonzini #include "elf.h" 4453018216SPaolo Bonzini #include "net/net.h" 45ad440b4aSAndrew Jones #include "sysemu/device_tree.h" 4653018216SPaolo Bonzini #include "sysemu/cpus.h" 47b3946626SVincent Palatin #include "sysemu/hw_accel.h" 4853018216SPaolo Bonzini #include "kvm_ppc.h" 49c4b63b7cSJuan Quintela #include "migration/misc.h" 50ca77ee28SMarkus Armbruster #include "migration/qemu-file-types.h" 5184a899deSJuan Quintela #include "migration/global_state.h" 52f2a8f0a6SJuan Quintela #include "migration/register.h" 532500fb42SAravinda Prasad #include "migration/blocker.h" 544be21d56SDavid Gibson #include "mmu-hash64.h" 55b4db5413SSuraj Jitindar Singh #include "mmu-book3s-v3.h" 567abd43baSSuraj Jitindar Singh #include "cpu-models.h" 572e5b09fdSMarkus Armbruster #include "hw/core/cpu.h" 5853018216SPaolo Bonzini 590d09e41aSPaolo Bonzini #include "hw/ppc/ppc.h" 6053018216SPaolo Bonzini #include "hw/loader.h" 6153018216SPaolo Bonzini 627804c353SCédric Le Goater #include "hw/ppc/fdt.h" 630d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h" 640d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h" 6546d80a56SPhilippe Mathieu-Daudé #include "hw/ppc/vof.h" 66a27bd6c7SMarkus Armbruster #include "hw/qdev-properties.h" 670d09e41aSPaolo Bonzini #include "hw/pci-host/spapr.h" 6853018216SPaolo Bonzini #include "hw/pci/msi.h" 6953018216SPaolo Bonzini 7053018216SPaolo Bonzini #include "hw/pci/pci.h" 7171461b0fSAlexey Kardashevskiy #include "hw/scsi/scsi.h" 7271461b0fSAlexey Kardashevskiy #include "hw/virtio/virtio-scsi.h" 73c4e13492SFelipe Franciosi #include "hw/virtio/vhost-scsi-common.h" 7453018216SPaolo Bonzini 752309832aSDavid Gibson #include "exec/ram_addr.h" 7653018216SPaolo Bonzini #include "hw/usb.h" 7753018216SPaolo Bonzini #include "qemu/config-file.h" 78135a129aSAneesh Kumar K.V #include "qemu/error-report.h" 792a6593cbSAlexey Kardashevskiy #include "trace.h" 8034316482SAlexey Kardashevskiy #include "hw/nmi.h" 816449da45SCédric Le Goater #include "hw/intc/intc.h" 8253018216SPaolo Bonzini 8394a94e4cSBharata B Rao #include "hw/ppc/spapr_cpu_core.h" 842cc0e2e8SDavid Hildenbrand #include "hw/mem/memory-device.h" 850fb6bd07SMichael Roth #include "hw/ppc/spapr_tpm_proxy.h" 86ee3a71e3SShivaprasad G Bhat #include "hw/ppc/spapr_nvdimm.h" 871eee9950SDaniel Henrique Barboza #include "hw/ppc/spapr_numa.h" 886c8ebe30SDavid Gibson #include "hw/ppc/pef.h" 8968a27b20SMichael S. Tsirkin 90f041d6afSGreg Kurz #include "monitor/monitor.h" 91f041d6afSGreg Kurz 9253018216SPaolo Bonzini #include <libfdt.h> 9353018216SPaolo Bonzini 9453018216SPaolo Bonzini /* SLOF memory layout: 9553018216SPaolo Bonzini * 9653018216SPaolo Bonzini * SLOF raw image loaded at 0, copies its romfs right below the flat 9753018216SPaolo Bonzini * device-tree, then position SLOF itself 31M below that 9853018216SPaolo Bonzini * 9953018216SPaolo Bonzini * So we set FW_OVERHEAD to 40MB which should account for all of that 10053018216SPaolo Bonzini * and more 10153018216SPaolo Bonzini * 10253018216SPaolo Bonzini * We load our kernel at 4M, leaving space for SLOF initial image 10353018216SPaolo Bonzini */ 1044b98e72dSAlexey Kardashevskiy #define FDT_MAX_ADDR 0x80000000 /* FDT must stay below that */ 10553018216SPaolo Bonzini #define FW_MAX_SIZE 0x400000 10653018216SPaolo Bonzini #define FW_FILE_NAME "slof.bin" 107fc8c745dSAlexey Kardashevskiy #define FW_FILE_NAME_VOF "vof.bin" 10853018216SPaolo Bonzini #define FW_OVERHEAD 0x2800000 10953018216SPaolo Bonzini #define KERNEL_LOAD_ADDR FW_MAX_SIZE 11053018216SPaolo Bonzini 1119943266eSDavid Gibson #define MIN_RMA_SLOF (128 * MiB) 11253018216SPaolo Bonzini 1135c7adcf4SGreg Kurz #define PHANDLE_INTC 0x00001111 11453018216SPaolo Bonzini 1155d0fb150SGreg Kurz /* These two functions implement the VCPU id numbering: one to compute them 1165d0fb150SGreg Kurz * all and one to identify thread 0 of a VCORE. Any change to the first one 1175d0fb150SGreg Kurz * is likely to have an impact on the second one, so let's keep them close. 1185d0fb150SGreg Kurz */ 119ce2918cbSDavid Gibson static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index) 1205d0fb150SGreg Kurz { 121fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 122fe6b6346SLike Xu unsigned int smp_threads = ms->smp.threads; 123fe6b6346SLike Xu 1241a5008fcSGreg Kurz assert(spapr->vsmt); 1255d0fb150SGreg Kurz return 1265d0fb150SGreg Kurz (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads; 1275d0fb150SGreg Kurz } 128ce2918cbSDavid Gibson static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr, 1295d0fb150SGreg Kurz PowerPCCPU *cpu) 1305d0fb150SGreg Kurz { 1311a5008fcSGreg Kurz assert(spapr->vsmt); 1325d0fb150SGreg Kurz return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0; 1335d0fb150SGreg Kurz } 1345d0fb150SGreg Kurz 13546f7afa3SGreg Kurz static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque) 13646f7afa3SGreg Kurz { 13746f7afa3SGreg Kurz /* Dummy entries correspond to unused ICPState objects in older QEMUs, 13846f7afa3SGreg Kurz * and newer QEMUs don't even have them. In both cases, we don't want 13946f7afa3SGreg Kurz * to send anything on the wire. 14046f7afa3SGreg Kurz */ 14146f7afa3SGreg Kurz return false; 14246f7afa3SGreg Kurz } 14346f7afa3SGreg Kurz 14446f7afa3SGreg Kurz static const VMStateDescription pre_2_10_vmstate_dummy_icp = { 14546f7afa3SGreg Kurz .name = "icp/server", 14646f7afa3SGreg Kurz .version_id = 1, 14746f7afa3SGreg Kurz .minimum_version_id = 1, 14846f7afa3SGreg Kurz .needed = pre_2_10_vmstate_dummy_icp_needed, 14946f7afa3SGreg Kurz .fields = (VMStateField[]) { 15046f7afa3SGreg Kurz VMSTATE_UNUSED(4), /* uint32_t xirr */ 15146f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t pending_priority */ 15246f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t mfrr */ 15346f7afa3SGreg Kurz VMSTATE_END_OF_LIST() 15446f7afa3SGreg Kurz }, 15546f7afa3SGreg Kurz }; 15646f7afa3SGreg Kurz 15746f7afa3SGreg Kurz static void pre_2_10_vmstate_register_dummy_icp(int i) 15846f7afa3SGreg Kurz { 15946f7afa3SGreg Kurz vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp, 16046f7afa3SGreg Kurz (void *)(uintptr_t) i); 16146f7afa3SGreg Kurz } 16246f7afa3SGreg Kurz 16346f7afa3SGreg Kurz static void pre_2_10_vmstate_unregister_dummy_icp(int i) 16446f7afa3SGreg Kurz { 16546f7afa3SGreg Kurz vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp, 16646f7afa3SGreg Kurz (void *)(uintptr_t) i); 16746f7afa3SGreg Kurz } 16846f7afa3SGreg Kurz 169ce2918cbSDavid Gibson int spapr_max_server_number(SpaprMachineState *spapr) 17046f7afa3SGreg Kurz { 171fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 172fe6b6346SLike Xu 1731a5008fcSGreg Kurz assert(spapr->vsmt); 174fe6b6346SLike Xu return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads); 17546f7afa3SGreg Kurz } 17646f7afa3SGreg Kurz 177833d4668SAlexey Kardashevskiy static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, 178833d4668SAlexey Kardashevskiy int smt_threads) 179833d4668SAlexey Kardashevskiy { 180833d4668SAlexey Kardashevskiy int i, ret = 0; 181a580fdcdSPhilippe Mathieu-Daudé g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads); 182a580fdcdSPhilippe Mathieu-Daudé g_autofree uint32_t *gservers_prop = g_new(uint32_t, smt_threads * 2); 18314bb4486SGreg Kurz int index = spapr_get_vcpu_id(cpu); 184833d4668SAlexey Kardashevskiy 185d6e166c0SDavid Gibson if (cpu->compat_pvr) { 186d6e166c0SDavid Gibson ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr); 1876d9412eaSAlexey Kardashevskiy if (ret < 0) { 1886d9412eaSAlexey Kardashevskiy return ret; 1896d9412eaSAlexey Kardashevskiy } 1906d9412eaSAlexey Kardashevskiy } 1916d9412eaSAlexey Kardashevskiy 192833d4668SAlexey Kardashevskiy /* Build interrupt servers and gservers properties */ 193833d4668SAlexey Kardashevskiy for (i = 0; i < smt_threads; i++) { 194833d4668SAlexey Kardashevskiy servers_prop[i] = cpu_to_be32(index + i); 195833d4668SAlexey Kardashevskiy /* Hack, direct the group queues back to cpu 0 */ 196833d4668SAlexey Kardashevskiy gservers_prop[i*2] = cpu_to_be32(index + i); 197833d4668SAlexey Kardashevskiy gservers_prop[i*2 + 1] = 0; 198833d4668SAlexey Kardashevskiy } 199833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", 200a580fdcdSPhilippe Mathieu-Daudé servers_prop, sizeof(*servers_prop) * smt_threads); 201833d4668SAlexey Kardashevskiy if (ret < 0) { 202833d4668SAlexey Kardashevskiy return ret; 203833d4668SAlexey Kardashevskiy } 204833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s", 205a580fdcdSPhilippe Mathieu-Daudé gservers_prop, sizeof(*gservers_prop) * smt_threads * 2); 206833d4668SAlexey Kardashevskiy 207833d4668SAlexey Kardashevskiy return ret; 208833d4668SAlexey Kardashevskiy } 209833d4668SAlexey Kardashevskiy 21091335a5eSDavid Gibson static void spapr_dt_pa_features(SpaprMachineState *spapr, 211ee76a09fSDavid Gibson PowerPCCPU *cpu, 212daa36379SDavid Gibson void *fdt, int offset) 21386d5771aSSam Bobroff { 21486d5771aSSam Bobroff uint8_t pa_features_206[] = { 6, 0, 21586d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 }; 21686d5771aSSam Bobroff uint8_t pa_features_207[] = { 24, 0, 21786d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, 21886d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 21986d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 22086d5771aSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; 2219fb4541fSSam Bobroff uint8_t pa_features_300[] = { 66, 0, 2229fb4541fSSam Bobroff /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ 2239fb4541fSSam Bobroff /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */ 22486d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ 2259fb4541fSSam Bobroff /* 6: DS207 */ 22686d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ 2279fb4541fSSam Bobroff /* 16: Vector */ 22886d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ 2299fb4541fSSam Bobroff /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ 2309bf502feSDavid Gibson 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ 2319fb4541fSSam Bobroff /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ 2329fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ 2339fb4541fSSam Bobroff /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */ 2349fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ 2359fb4541fSSam Bobroff /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */ 2369fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */ 2379fb4541fSSam Bobroff /* 42: PM, 44: PC RA, 46: SC vec'd */ 2389fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ 2399fb4541fSSam Bobroff /* 48: SIMD, 50: QP BFP, 52: String */ 2409fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ 2419fb4541fSSam Bobroff /* 54: DecFP, 56: DecI, 58: SHA */ 2429fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ 2439fb4541fSSam Bobroff /* 60: NM atomic, 62: RNG */ 2449fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ 2459fb4541fSSam Bobroff }; 2467abd43baSSuraj Jitindar Singh uint8_t *pa_features = NULL; 24786d5771aSSam Bobroff size_t pa_size; 24886d5771aSSam Bobroff 2497abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) { 25086d5771aSSam Bobroff pa_features = pa_features_206; 25186d5771aSSam Bobroff pa_size = sizeof(pa_features_206); 2527abd43baSSuraj Jitindar Singh } 2537abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) { 25486d5771aSSam Bobroff pa_features = pa_features_207; 25586d5771aSSam Bobroff pa_size = sizeof(pa_features_207); 2567abd43baSSuraj Jitindar Singh } 2577abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) { 25886d5771aSSam Bobroff pa_features = pa_features_300; 25986d5771aSSam Bobroff pa_size = sizeof(pa_features_300); 2607abd43baSSuraj Jitindar Singh } 2617abd43baSSuraj Jitindar Singh if (!pa_features) { 26286d5771aSSam Bobroff return; 26386d5771aSSam Bobroff } 26486d5771aSSam Bobroff 26526cd35b8SDavid Gibson if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) { 26686d5771aSSam Bobroff /* 26786d5771aSSam Bobroff * Note: we keep CI large pages off by default because a 64K capable 26886d5771aSSam Bobroff * guest provisioned with large pages might otherwise try to map a qemu 26986d5771aSSam Bobroff * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages 27086d5771aSSam Bobroff * even if that qemu runs on a 4k host. 27186d5771aSSam Bobroff * We dd this bit back here if we are confident this is not an issue 27286d5771aSSam Bobroff */ 27386d5771aSSam Bobroff pa_features[3] |= 0x20; 27486d5771aSSam Bobroff } 2754e5fe368SSuraj Jitindar Singh if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) { 27686d5771aSSam Bobroff pa_features[24] |= 0x80; /* Transactional memory support */ 27786d5771aSSam Bobroff } 278daa36379SDavid Gibson if (spapr->cas_pre_isa3_guest && pa_size > 40) { 279e957f6a9SSam Bobroff /* Workaround for broken kernels that attempt (guest) radix 280e957f6a9SSam Bobroff * mode when they can't handle it, if they see the radix bit set 281e957f6a9SSam Bobroff * in pa-features. So hide it from them. */ 282e957f6a9SSam Bobroff pa_features[40 + 2] &= ~0x80; /* Radix MMU */ 283e957f6a9SSam Bobroff } 28486d5771aSSam Bobroff 28586d5771aSSam Bobroff _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size))); 28686d5771aSSam Bobroff } 28786d5771aSSam Bobroff 288c86c1affSDaniel Henrique Barboza static hwaddr spapr_node0_size(MachineState *machine) 289b082d65aSAlexey Kardashevskiy { 290aa570207STao Xu if (machine->numa_state->num_nodes) { 291b082d65aSAlexey Kardashevskiy int i; 292aa570207STao Xu for (i = 0; i < machine->numa_state->num_nodes; ++i) { 2937e721e7bSTao Xu if (machine->numa_state->nodes[i].node_mem) { 2947e721e7bSTao Xu return MIN(pow2floor(machine->numa_state->nodes[i].node_mem), 295fb164994SDavid Gibson machine->ram_size); 296b082d65aSAlexey Kardashevskiy } 297b082d65aSAlexey Kardashevskiy } 298b082d65aSAlexey Kardashevskiy } 299fb164994SDavid Gibson return machine->ram_size; 300b082d65aSAlexey Kardashevskiy } 301b082d65aSAlexey Kardashevskiy 302a1d59c0fSAlexey Kardashevskiy static void add_str(GString *s, const gchar *s1) 303a1d59c0fSAlexey Kardashevskiy { 304a1d59c0fSAlexey Kardashevskiy g_string_append_len(s, s1, strlen(s1) + 1); 305a1d59c0fSAlexey Kardashevskiy } 30653018216SPaolo Bonzini 307f1aa45ffSDaniel Henrique Barboza static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid, 308f1aa45ffSDaniel Henrique Barboza hwaddr start, hwaddr size) 30926a8c353SAlexey Kardashevskiy { 31026a8c353SAlexey Kardashevskiy char mem_name[32]; 31126a8c353SAlexey Kardashevskiy uint64_t mem_reg_property[2]; 31226a8c353SAlexey Kardashevskiy int off; 31326a8c353SAlexey Kardashevskiy 31426a8c353SAlexey Kardashevskiy mem_reg_property[0] = cpu_to_be64(start); 31526a8c353SAlexey Kardashevskiy mem_reg_property[1] = cpu_to_be64(size); 31626a8c353SAlexey Kardashevskiy 3173a17e38fSAlexey Kardashevskiy sprintf(mem_name, "memory@%" HWADDR_PRIx, start); 31826a8c353SAlexey Kardashevskiy off = fdt_add_subnode(fdt, 0, mem_name); 31926a8c353SAlexey Kardashevskiy _FDT(off); 32026a8c353SAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 32126a8c353SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 32226a8c353SAlexey Kardashevskiy sizeof(mem_reg_property)))); 323f1aa45ffSDaniel Henrique Barboza spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid); 32403d196b7SBharata B Rao return off; 32526a8c353SAlexey Kardashevskiy } 32626a8c353SAlexey Kardashevskiy 327f47bd1c8SIgor Mammedov static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr) 328f47bd1c8SIgor Mammedov { 329f47bd1c8SIgor Mammedov MemoryDeviceInfoList *info; 330f47bd1c8SIgor Mammedov 331f47bd1c8SIgor Mammedov for (info = list; info; info = info->next) { 332f47bd1c8SIgor Mammedov MemoryDeviceInfo *value = info->value; 333f47bd1c8SIgor Mammedov 334f47bd1c8SIgor Mammedov if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) { 335f47bd1c8SIgor Mammedov PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data; 336f47bd1c8SIgor Mammedov 337ccc2cef8SDavid Gibson if (addr >= pcdimm_info->addr && 338f47bd1c8SIgor Mammedov addr < (pcdimm_info->addr + pcdimm_info->size)) { 339f47bd1c8SIgor Mammedov return pcdimm_info->node; 340f47bd1c8SIgor Mammedov } 341f47bd1c8SIgor Mammedov } 342f47bd1c8SIgor Mammedov } 343f47bd1c8SIgor Mammedov 344f47bd1c8SIgor Mammedov return -1; 345f47bd1c8SIgor Mammedov } 346f47bd1c8SIgor Mammedov 347a324d6f1SBharata B Rao struct sPAPRDrconfCellV2 { 348a324d6f1SBharata B Rao uint32_t seq_lmbs; 349a324d6f1SBharata B Rao uint64_t base_addr; 350a324d6f1SBharata B Rao uint32_t drc_index; 351a324d6f1SBharata B Rao uint32_t aa_index; 352a324d6f1SBharata B Rao uint32_t flags; 353a324d6f1SBharata B Rao } QEMU_PACKED; 354a324d6f1SBharata B Rao 355a324d6f1SBharata B Rao typedef struct DrconfCellQueue { 356a324d6f1SBharata B Rao struct sPAPRDrconfCellV2 cell; 357a324d6f1SBharata B Rao QSIMPLEQ_ENTRY(DrconfCellQueue) entry; 358a324d6f1SBharata B Rao } DrconfCellQueue; 359a324d6f1SBharata B Rao 360a324d6f1SBharata B Rao static DrconfCellQueue * 361a324d6f1SBharata B Rao spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr, 362a324d6f1SBharata B Rao uint32_t drc_index, uint32_t aa_index, 363a324d6f1SBharata B Rao uint32_t flags) 36403d196b7SBharata B Rao { 365a324d6f1SBharata B Rao DrconfCellQueue *elem; 366a324d6f1SBharata B Rao 367a324d6f1SBharata B Rao elem = g_malloc0(sizeof(*elem)); 368a324d6f1SBharata B Rao elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs); 369a324d6f1SBharata B Rao elem->cell.base_addr = cpu_to_be64(base_addr); 370a324d6f1SBharata B Rao elem->cell.drc_index = cpu_to_be32(drc_index); 371a324d6f1SBharata B Rao elem->cell.aa_index = cpu_to_be32(aa_index); 372a324d6f1SBharata B Rao elem->cell.flags = cpu_to_be32(flags); 373a324d6f1SBharata B Rao 374a324d6f1SBharata B Rao return elem; 375a324d6f1SBharata B Rao } 376a324d6f1SBharata B Rao 37791335a5eSDavid Gibson static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt, 378a324d6f1SBharata B Rao int offset, MemoryDeviceInfoList *dimms) 3792a6593cbSAlexey Kardashevskiy { 3802a6593cbSAlexey Kardashevskiy MachineState *machine = MACHINE(spapr); 381cc941111SFabiano Rosas uint8_t *int_buf, *cur_index; 382a324d6f1SBharata B Rao int ret; 38303d196b7SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 384a324d6f1SBharata B Rao uint64_t addr, cur_addr, size; 385b0c14ec4SDavid Hildenbrand uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size); 386b0c14ec4SDavid Hildenbrand uint64_t mem_end = machine->device_memory->base + 387b0c14ec4SDavid Hildenbrand memory_region_size(&machine->device_memory->mr); 388cc941111SFabiano Rosas uint32_t node, buf_len, nr_entries = 0; 389ce2918cbSDavid Gibson SpaprDrc *drc; 390a324d6f1SBharata B Rao DrconfCellQueue *elem, *next; 391a324d6f1SBharata B Rao MemoryDeviceInfoList *info; 392a324d6f1SBharata B Rao QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue 393a324d6f1SBharata B Rao = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue); 394a324d6f1SBharata B Rao 395a324d6f1SBharata B Rao /* Entry to cover RAM and the gap area */ 396a324d6f1SBharata B Rao elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1, 397a324d6f1SBharata B Rao SPAPR_LMB_FLAGS_RESERVED | 398a324d6f1SBharata B Rao SPAPR_LMB_FLAGS_DRC_INVALID); 399a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 400a324d6f1SBharata B Rao nr_entries++; 401a324d6f1SBharata B Rao 402b0c14ec4SDavid Hildenbrand cur_addr = machine->device_memory->base; 403a324d6f1SBharata B Rao for (info = dimms; info; info = info->next) { 404a324d6f1SBharata B Rao PCDIMMDeviceInfo *di = info->value->u.dimm.data; 405a324d6f1SBharata B Rao 406a324d6f1SBharata B Rao addr = di->addr; 407a324d6f1SBharata B Rao size = di->size; 408a324d6f1SBharata B Rao node = di->node; 409a324d6f1SBharata B Rao 410ee3a71e3SShivaprasad G Bhat /* 411ee3a71e3SShivaprasad G Bhat * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The 412ee3a71e3SShivaprasad G Bhat * area is marked hotpluggable in the next iteration for the bigger 413ee3a71e3SShivaprasad G Bhat * chunk including the NVDIMM occupied area. 414ee3a71e3SShivaprasad G Bhat */ 415ee3a71e3SShivaprasad G Bhat if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM) 416ee3a71e3SShivaprasad G Bhat continue; 417ee3a71e3SShivaprasad G Bhat 418a324d6f1SBharata B Rao /* Entry for hot-pluggable area */ 419a324d6f1SBharata B Rao if (cur_addr < addr) { 420a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); 421a324d6f1SBharata B Rao g_assert(drc); 422a324d6f1SBharata B Rao elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size, 423a324d6f1SBharata B Rao cur_addr, spapr_drc_index(drc), -1, 0); 424a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 425a324d6f1SBharata B Rao nr_entries++; 426a324d6f1SBharata B Rao } 427a324d6f1SBharata B Rao 428a324d6f1SBharata B Rao /* Entry for DIMM */ 429a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size); 430a324d6f1SBharata B Rao g_assert(drc); 431a324d6f1SBharata B Rao elem = spapr_get_drconf_cell(size / lmb_size, addr, 432a324d6f1SBharata B Rao spapr_drc_index(drc), node, 4330911a60cSLeonardo Bras (SPAPR_LMB_FLAGS_ASSIGNED | 4340911a60cSLeonardo Bras SPAPR_LMB_FLAGS_HOTREMOVABLE)); 435a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 436a324d6f1SBharata B Rao nr_entries++; 437a324d6f1SBharata B Rao cur_addr = addr + size; 438a324d6f1SBharata B Rao } 439a324d6f1SBharata B Rao 440a324d6f1SBharata B Rao /* Entry for remaining hotpluggable area */ 441a324d6f1SBharata B Rao if (cur_addr < mem_end) { 442a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); 443a324d6f1SBharata B Rao g_assert(drc); 444a324d6f1SBharata B Rao elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size, 445a324d6f1SBharata B Rao cur_addr, spapr_drc_index(drc), -1, 0); 446a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 447a324d6f1SBharata B Rao nr_entries++; 448a324d6f1SBharata B Rao } 449a324d6f1SBharata B Rao 450a324d6f1SBharata B Rao buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t); 451a324d6f1SBharata B Rao int_buf = cur_index = g_malloc0(buf_len); 452a324d6f1SBharata B Rao *(uint32_t *)int_buf = cpu_to_be32(nr_entries); 453a324d6f1SBharata B Rao cur_index += sizeof(nr_entries); 454a324d6f1SBharata B Rao 455a324d6f1SBharata B Rao QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) { 456a324d6f1SBharata B Rao memcpy(cur_index, &elem->cell, sizeof(elem->cell)); 457a324d6f1SBharata B Rao cur_index += sizeof(elem->cell); 458a324d6f1SBharata B Rao QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry); 459a324d6f1SBharata B Rao g_free(elem); 460a324d6f1SBharata B Rao } 461a324d6f1SBharata B Rao 462a324d6f1SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len); 463a324d6f1SBharata B Rao g_free(int_buf); 464a324d6f1SBharata B Rao if (ret < 0) { 465a324d6f1SBharata B Rao return -1; 466a324d6f1SBharata B Rao } 467a324d6f1SBharata B Rao return 0; 468a324d6f1SBharata B Rao } 469a324d6f1SBharata B Rao 47091335a5eSDavid Gibson static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt, 471a324d6f1SBharata B Rao int offset, MemoryDeviceInfoList *dimms) 472a324d6f1SBharata B Rao { 473b0c14ec4SDavid Hildenbrand MachineState *machine = MACHINE(spapr); 474a324d6f1SBharata B Rao int i, ret; 475a324d6f1SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 4760c9269a5SDavid Hildenbrand uint32_t device_lmb_start = machine->device_memory->base / lmb_size; 477b0c14ec4SDavid Hildenbrand uint32_t nr_lmbs = (machine->device_memory->base + 478b0c14ec4SDavid Hildenbrand memory_region_size(&machine->device_memory->mr)) / 479d0e5a8f2SBharata B Rao lmb_size; 48003d196b7SBharata B Rao uint32_t *int_buf, *cur_index, buf_len; 48116c25aefSBharata B Rao 48216c25aefSBharata B Rao /* 483ef001f06SThomas Huth * Allocate enough buffer size to fit in ibm,dynamic-memory 484ef001f06SThomas Huth */ 485a324d6f1SBharata B Rao buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t); 48603d196b7SBharata B Rao cur_index = int_buf = g_malloc0(buf_len); 48703d196b7SBharata B Rao int_buf[0] = cpu_to_be32(nr_lmbs); 48803d196b7SBharata B Rao cur_index++; 48903d196b7SBharata B Rao for (i = 0; i < nr_lmbs; i++) { 490d0e5a8f2SBharata B Rao uint64_t addr = i * lmb_size; 49103d196b7SBharata B Rao uint32_t *dynamic_memory = cur_index; 49203d196b7SBharata B Rao 4930c9269a5SDavid Hildenbrand if (i >= device_lmb_start) { 494ce2918cbSDavid Gibson SpaprDrc *drc; 495d0e5a8f2SBharata B Rao 496fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i); 49703d196b7SBharata B Rao g_assert(drc); 49803d196b7SBharata B Rao 49903d196b7SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 50003d196b7SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 5010b55aa91SDavid Gibson dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc)); 50203d196b7SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 503f47bd1c8SIgor Mammedov dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr)); 504d0e5a8f2SBharata B Rao if (memory_region_present(get_system_memory(), addr)) { 50503d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED); 50603d196b7SBharata B Rao } else { 50703d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(0); 50803d196b7SBharata B Rao } 509d0e5a8f2SBharata B Rao } else { 510d0e5a8f2SBharata B Rao /* 511d0e5a8f2SBharata B Rao * LMB information for RMA, boot time RAM and gap b/n RAM and 5120c9269a5SDavid Hildenbrand * device memory region -- all these are marked as reserved 513d0e5a8f2SBharata B Rao * and as having no valid DRC. 514d0e5a8f2SBharata B Rao */ 515d0e5a8f2SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 516d0e5a8f2SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 517d0e5a8f2SBharata B Rao dynamic_memory[2] = cpu_to_be32(0); 518d0e5a8f2SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 519d0e5a8f2SBharata B Rao dynamic_memory[4] = cpu_to_be32(-1); 520d0e5a8f2SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED | 521d0e5a8f2SBharata B Rao SPAPR_LMB_FLAGS_DRC_INVALID); 522d0e5a8f2SBharata B Rao } 52303d196b7SBharata B Rao 52403d196b7SBharata B Rao cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE; 52503d196b7SBharata B Rao } 52603d196b7SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len); 527a324d6f1SBharata B Rao g_free(int_buf); 52803d196b7SBharata B Rao if (ret < 0) { 529a324d6f1SBharata B Rao return -1; 530a324d6f1SBharata B Rao } 531a324d6f1SBharata B Rao return 0; 532a324d6f1SBharata B Rao } 533a324d6f1SBharata B Rao 534a324d6f1SBharata B Rao /* 535a324d6f1SBharata B Rao * Adds ibm,dynamic-reconfiguration-memory node. 536a324d6f1SBharata B Rao * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation 537a324d6f1SBharata B Rao * of this device tree node. 538a324d6f1SBharata B Rao */ 53991335a5eSDavid Gibson static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr, 54091335a5eSDavid Gibson void *fdt) 541a324d6f1SBharata B Rao { 542a324d6f1SBharata B Rao MachineState *machine = MACHINE(spapr); 5430ee52012SDaniel Henrique Barboza int ret, offset; 544a324d6f1SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 5457abf9797SAnton Blanchard uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32), 5467abf9797SAnton Blanchard cpu_to_be32(lmb_size & 0xffffffff)}; 547a324d6f1SBharata B Rao MemoryDeviceInfoList *dimms = NULL; 548a324d6f1SBharata B Rao 549a324d6f1SBharata B Rao /* 5500c9269a5SDavid Hildenbrand * Don't create the node if there is no device memory 551a324d6f1SBharata B Rao */ 552a324d6f1SBharata B Rao if (machine->ram_size == machine->maxram_size) { 553a324d6f1SBharata B Rao return 0; 554a324d6f1SBharata B Rao } 555a324d6f1SBharata B Rao 556a324d6f1SBharata B Rao offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory"); 557a324d6f1SBharata B Rao 558a324d6f1SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size, 559a324d6f1SBharata B Rao sizeof(prop_lmb_size)); 560a324d6f1SBharata B Rao if (ret < 0) { 561a324d6f1SBharata B Rao return ret; 562a324d6f1SBharata B Rao } 563a324d6f1SBharata B Rao 564a324d6f1SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff); 565a324d6f1SBharata B Rao if (ret < 0) { 566a324d6f1SBharata B Rao return ret; 567a324d6f1SBharata B Rao } 568a324d6f1SBharata B Rao 569a324d6f1SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0); 570a324d6f1SBharata B Rao if (ret < 0) { 571a324d6f1SBharata B Rao return ret; 572a324d6f1SBharata B Rao } 573a324d6f1SBharata B Rao 574a324d6f1SBharata B Rao /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */ 5752cc0e2e8SDavid Hildenbrand dimms = qmp_memory_device_list(); 576a324d6f1SBharata B Rao if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) { 57791335a5eSDavid Gibson ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms); 578a324d6f1SBharata B Rao } else { 57991335a5eSDavid Gibson ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms); 580a324d6f1SBharata B Rao } 581a324d6f1SBharata B Rao qapi_free_MemoryDeviceInfoList(dimms); 582a324d6f1SBharata B Rao 583a324d6f1SBharata B Rao if (ret < 0) { 584a324d6f1SBharata B Rao return ret; 58503d196b7SBharata B Rao } 58603d196b7SBharata B Rao 5870ee52012SDaniel Henrique Barboza ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset); 588a324d6f1SBharata B Rao 58903d196b7SBharata B Rao return ret; 59003d196b7SBharata B Rao } 59103d196b7SBharata B Rao 59291335a5eSDavid Gibson static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt) 5936787d27bSMichael Roth { 594fa523f0dSDavid Gibson MachineState *machine = MACHINE(spapr); 595ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 59653018216SPaolo Bonzini hwaddr mem_start, node_size; 59753018216SPaolo Bonzini int i, nb_nodes = machine->numa_state->num_nodes; 59853018216SPaolo Bonzini NodeInfo *nodes = machine->numa_state->nodes; 59953018216SPaolo Bonzini 60053018216SPaolo Bonzini for (i = 0, mem_start = 0; i < nb_nodes; ++i) { 60153018216SPaolo Bonzini if (!nodes[i].node_mem) { 60253018216SPaolo Bonzini continue; 60353018216SPaolo Bonzini } 60453018216SPaolo Bonzini if (mem_start >= machine->ram_size) { 60553018216SPaolo Bonzini node_size = 0; 60653018216SPaolo Bonzini } else { 60753018216SPaolo Bonzini node_size = nodes[i].node_mem; 60853018216SPaolo Bonzini if (node_size > machine->ram_size - mem_start) { 60953018216SPaolo Bonzini node_size = machine->ram_size - mem_start; 61053018216SPaolo Bonzini } 61153018216SPaolo Bonzini } 61253018216SPaolo Bonzini if (!mem_start) { 61353018216SPaolo Bonzini /* spapr_machine_init() checks for rma_size <= node0_size 61453018216SPaolo Bonzini * already */ 615f1aa45ffSDaniel Henrique Barboza spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size); 61653018216SPaolo Bonzini mem_start += spapr->rma_size; 61753018216SPaolo Bonzini node_size -= spapr->rma_size; 61853018216SPaolo Bonzini } 61953018216SPaolo Bonzini for ( ; node_size; ) { 62053018216SPaolo Bonzini hwaddr sizetmp = pow2floor(node_size); 62153018216SPaolo Bonzini 62253018216SPaolo Bonzini /* mem_start != 0 here */ 62353018216SPaolo Bonzini if (ctzl(mem_start) < ctzl(sizetmp)) { 62453018216SPaolo Bonzini sizetmp = 1ULL << ctzl(mem_start); 62553018216SPaolo Bonzini } 62653018216SPaolo Bonzini 627f1aa45ffSDaniel Henrique Barboza spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp); 62853018216SPaolo Bonzini node_size -= sizetmp; 62953018216SPaolo Bonzini mem_start += sizetmp; 63053018216SPaolo Bonzini } 63153018216SPaolo Bonzini } 63253018216SPaolo Bonzini 6336787d27bSMichael Roth /* Generate ibm,dynamic-reconfiguration-memory node if required */ 634fa523f0dSDavid Gibson if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) { 635fa523f0dSDavid Gibson int ret; 636fa523f0dSDavid Gibson 6376787d27bSMichael Roth g_assert(smc->dr_lmb_enabled); 63891335a5eSDavid Gibson ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt); 639417ece33SMichael Roth if (ret) { 6409b6c1da5SDaniel Henrique Barboza return ret; 641417ece33SMichael Roth } 6426787d27bSMichael Roth } 6436787d27bSMichael Roth 64453018216SPaolo Bonzini return 0; 64553018216SPaolo Bonzini } 64653018216SPaolo Bonzini 64791335a5eSDavid Gibson static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset, 64853018216SPaolo Bonzini SpaprMachineState *spapr) 64953018216SPaolo Bonzini { 65053018216SPaolo Bonzini MachineState *ms = MACHINE(spapr); 65153018216SPaolo Bonzini PowerPCCPU *cpu = POWERPC_CPU(cs); 65253018216SPaolo Bonzini CPUPPCState *env = &cpu->env; 65353018216SPaolo Bonzini PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 65453018216SPaolo Bonzini int index = spapr_get_vcpu_id(cpu); 65553018216SPaolo Bonzini uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 65653018216SPaolo Bonzini 0xffffffff, 0xffffffff}; 65753018216SPaolo Bonzini uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() 65853018216SPaolo Bonzini : SPAPR_TIMEBASE_FREQ; 65953018216SPaolo Bonzini uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; 66053018216SPaolo Bonzini uint32_t page_sizes_prop[64]; 66153018216SPaolo Bonzini size_t page_sizes_prop_size; 66253018216SPaolo Bonzini unsigned int smp_threads = ms->smp.threads; 66353018216SPaolo Bonzini uint32_t vcpus_per_socket = smp_threads * ms->smp.cores; 66453018216SPaolo Bonzini uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; 66553018216SPaolo Bonzini int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu)); 66653018216SPaolo Bonzini SpaprDrc *drc; 66753018216SPaolo Bonzini int drc_index; 66853018216SPaolo Bonzini uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ]; 66953018216SPaolo Bonzini int i; 67053018216SPaolo Bonzini 67153018216SPaolo Bonzini drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index); 67253018216SPaolo Bonzini if (drc) { 67353018216SPaolo Bonzini drc_index = spapr_drc_index(drc); 67453018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index))); 6752a6593cbSAlexey Kardashevskiy } 6762a6593cbSAlexey Kardashevskiy 6772a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "reg", index))); 6782a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu"))); 6792a6593cbSAlexey Kardashevskiy 6802a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR]))); 6812a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size", 6822a6593cbSAlexey Kardashevskiy env->dcache_line_size))); 6832a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size", 6842a6593cbSAlexey Kardashevskiy env->dcache_line_size))); 6852a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size", 6862a6593cbSAlexey Kardashevskiy env->icache_line_size))); 6872a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size", 6882a6593cbSAlexey Kardashevskiy env->icache_line_size))); 6892a6593cbSAlexey Kardashevskiy 6902a6593cbSAlexey Kardashevskiy if (pcc->l1_dcache_size) { 6912a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size", 6922a6593cbSAlexey Kardashevskiy pcc->l1_dcache_size))); 6932a6593cbSAlexey Kardashevskiy } else { 6942a6593cbSAlexey Kardashevskiy warn_report("Unknown L1 dcache size for cpu"); 6952a6593cbSAlexey Kardashevskiy } 6962a6593cbSAlexey Kardashevskiy if (pcc->l1_icache_size) { 6972a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size", 6982a6593cbSAlexey Kardashevskiy pcc->l1_icache_size))); 6992a6593cbSAlexey Kardashevskiy } else { 7002a6593cbSAlexey Kardashevskiy warn_report("Unknown L1 icache size for cpu"); 7012a6593cbSAlexey Kardashevskiy } 7022a6593cbSAlexey Kardashevskiy 7032a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq))); 7042a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq))); 7052a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size))); 7062a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size))); 7072a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, offset, "status", "okay"))); 7082a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0))); 7092a6593cbSAlexey Kardashevskiy 71003282a3aSLucas Mateus Castro (alqotel) if (ppc_has_spr(cpu, SPR_PURR)) { 71153018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1))); 71253018216SPaolo Bonzini } 71303282a3aSLucas Mateus Castro (alqotel) if (ppc_has_spr(cpu, SPR_PURR)) { 71453018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1))); 71553018216SPaolo Bonzini } 71653018216SPaolo Bonzini 71753018216SPaolo Bonzini if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) { 71853018216SPaolo Bonzini _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes", 71953018216SPaolo Bonzini segs, sizeof(segs)))); 72053018216SPaolo Bonzini } 72153018216SPaolo Bonzini 72253018216SPaolo Bonzini /* Advertise VSX (vector extensions) if available 72353018216SPaolo Bonzini * 1 == VMX / Altivec available 72453018216SPaolo Bonzini * 2 == VSX available 72553018216SPaolo Bonzini * 72653018216SPaolo Bonzini * Only CPUs for which we create core types in spapr_cpu_core.c 72753018216SPaolo Bonzini * are possible, and all of those have VMX */ 7282460e1d7SCédric Le Goater if (env->insns_flags & PPC_ALTIVEC) { 72953018216SPaolo Bonzini if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) { 73053018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2))); 73153018216SPaolo Bonzini } else { 73253018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1))); 73353018216SPaolo Bonzini } 7342460e1d7SCédric Le Goater } 73553018216SPaolo Bonzini 73653018216SPaolo Bonzini /* Advertise DFP (Decimal Floating Point) if available 73728e02042SDavid Gibson * 0 / no property == no DFP 73853018216SPaolo Bonzini * 1 == DFP available */ 739fb164994SDavid Gibson if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) { 7407db8a127SAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1))); 7417db8a127SAlexey Kardashevskiy } 7427db8a127SAlexey Kardashevskiy 7437db8a127SAlexey Kardashevskiy page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop, 74453018216SPaolo Bonzini sizeof(page_sizes_prop)); 7457db8a127SAlexey Kardashevskiy if (page_sizes_prop_size) { 7467db8a127SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes", 7477db8a127SAlexey Kardashevskiy page_sizes_prop, page_sizes_prop_size))); 748fb164994SDavid Gibson } 7497db8a127SAlexey Kardashevskiy 75091335a5eSDavid Gibson spapr_dt_pa_features(spapr, cpu, fdt, offset); 75153018216SPaolo Bonzini 7527db8a127SAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", 7537db8a127SAlexey Kardashevskiy cs->cpu_index / vcpus_per_socket))); 7547db8a127SAlexey Kardashevskiy 75553018216SPaolo Bonzini _FDT((fdt_setprop(fdt, offset, "ibm,pft-size", 756fb164994SDavid Gibson pft_size_prop, sizeof(pft_size_prop)))); 7575fe269b1SPaul Mackerras 7585fe269b1SPaul Mackerras if (ms->numa_state->num_nodes > 1) { 7598f86a408SDaniel Henrique Barboza _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu)); 7605fe269b1SPaul Mackerras } 7615fe269b1SPaul Mackerras 7627db8a127SAlexey Kardashevskiy _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt)); 7637db8a127SAlexey Kardashevskiy 7647db8a127SAlexey Kardashevskiy if (pcc->radix_page_info) { 7657db8a127SAlexey Kardashevskiy for (i = 0; i < pcc->radix_page_info->count; i++) { 7667db8a127SAlexey Kardashevskiy radix_AP_encodings[i] = 7677db8a127SAlexey Kardashevskiy cpu_to_be32(pcc->radix_page_info->entries[i]); 7686010818cSAlexey Kardashevskiy } 7696010818cSAlexey Kardashevskiy _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings", 7706010818cSAlexey Kardashevskiy radix_AP_encodings, 7716010818cSAlexey Kardashevskiy pcc->radix_page_info->count * 7726010818cSAlexey Kardashevskiy sizeof(radix_AP_encodings[0])))); 7736010818cSAlexey Kardashevskiy } 7746010818cSAlexey Kardashevskiy 7756010818cSAlexey Kardashevskiy /* 7766010818cSAlexey Kardashevskiy * We set this property to let the guest know that it can use the large 7776010818cSAlexey Kardashevskiy * decrementer and its width in bits. 7786010818cSAlexey Kardashevskiy */ 7796010818cSAlexey Kardashevskiy if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF) 78053018216SPaolo Bonzini _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits", 78153018216SPaolo Bonzini pcc->lrg_decr_bits))); 78253018216SPaolo Bonzini } 78353018216SPaolo Bonzini 78491335a5eSDavid Gibson static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr) 78553018216SPaolo Bonzini { 78653018216SPaolo Bonzini CPUState **rev; 78753018216SPaolo Bonzini CPUState *cs; 78853018216SPaolo Bonzini int n_cpus; 78953018216SPaolo Bonzini int cpus_offset; 79053018216SPaolo Bonzini int i; 79153018216SPaolo Bonzini 79253018216SPaolo Bonzini cpus_offset = fdt_add_subnode(fdt, 0, "cpus"); 79353018216SPaolo Bonzini _FDT(cpus_offset); 79453018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1))); 79553018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0))); 79653018216SPaolo Bonzini 79753018216SPaolo Bonzini /* 79853018216SPaolo Bonzini * We walk the CPUs in reverse order to ensure that CPU DT nodes 79953018216SPaolo Bonzini * created by fdt_add_subnode() end up in the right order in FDT 80053018216SPaolo Bonzini * for the guest kernel the enumerate the CPUs correctly. 80153018216SPaolo Bonzini * 80253018216SPaolo Bonzini * The CPU list cannot be traversed in reverse order, so we need 80353018216SPaolo Bonzini * to do extra work. 80453018216SPaolo Bonzini */ 80553018216SPaolo Bonzini n_cpus = 0; 80653018216SPaolo Bonzini rev = NULL; 80753018216SPaolo Bonzini CPU_FOREACH(cs) { 80853018216SPaolo Bonzini rev = g_renew(CPUState *, rev, n_cpus + 1); 80953018216SPaolo Bonzini rev[n_cpus++] = cs; 81053018216SPaolo Bonzini } 81153018216SPaolo Bonzini 81253018216SPaolo Bonzini for (i = n_cpus - 1; i >= 0; i--) { 81353018216SPaolo Bonzini CPUState *cs = rev[i]; 81453018216SPaolo Bonzini PowerPCCPU *cpu = POWERPC_CPU(cs); 8150da6f3feSBharata B Rao int index = spapr_get_vcpu_id(cpu); 8160da6f3feSBharata B Rao DeviceClass *dc = DEVICE_GET_CLASS(cs); 8177265bc3eSDaniel Henrique Barboza g_autofree char *nodename = NULL; 81853018216SPaolo Bonzini int offset; 81953018216SPaolo Bonzini 8200da6f3feSBharata B Rao if (!spapr_is_thread0_in_vcore(spapr, cpu)) { 8210da6f3feSBharata B Rao continue; 8220da6f3feSBharata B Rao } 8230da6f3feSBharata B Rao 8240da6f3feSBharata B Rao nodename = g_strdup_printf("%s@%x", dc->fw_name, index); 8250da6f3feSBharata B Rao offset = fdt_add_subnode(fdt, cpus_offset, nodename); 8260da6f3feSBharata B Rao _FDT(offset); 82791335a5eSDavid Gibson spapr_dt_cpu(cs, fdt, offset, spapr); 8280da6f3feSBharata B Rao } 8290da6f3feSBharata B Rao 8300da6f3feSBharata B Rao g_free(rev); 8310da6f3feSBharata B Rao } 83222419c2aSDavid Gibson 83391335a5eSDavid Gibson static int spapr_dt_rng(void *fdt) 8340da6f3feSBharata B Rao { 8350da6f3feSBharata B Rao int node; 8360da6f3feSBharata B Rao int ret; 8370da6f3feSBharata B Rao 8380da6f3feSBharata B Rao node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities"); 8390da6f3feSBharata B Rao if (node <= 0) { 8400da6f3feSBharata B Rao return -1; 8410da6f3feSBharata B Rao } 8420da6f3feSBharata B Rao ret = fdt_setprop_string(fdt, node, "device_type", 8430da6f3feSBharata B Rao "ibm,platform-facilities"); 8440da6f3feSBharata B Rao ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1); 8450da6f3feSBharata B Rao ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0); 8460da6f3feSBharata B Rao 8470da6f3feSBharata B Rao node = fdt_add_subnode(fdt, node, "ibm,random-v1"); 8480da6f3feSBharata B Rao if (node <= 0) { 8490da6f3feSBharata B Rao return -1; 8500da6f3feSBharata B Rao } 8510da6f3feSBharata B Rao ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random"); 8520da6f3feSBharata B Rao 8530da6f3feSBharata B Rao return ret ? -1 : 0; 8540da6f3feSBharata B Rao } 8550da6f3feSBharata B Rao 856ce2918cbSDavid Gibson static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) 8573f5dabceSDavid Gibson { 858fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 8593f5dabceSDavid Gibson int rtas; 8603f5dabceSDavid Gibson GString *hypertas = g_string_sized_new(256); 8613f5dabceSDavid Gibson GString *qemu_hypertas = g_string_sized_new(256); 8620c9269a5SDavid Hildenbrand uint64_t max_device_addr = MACHINE(spapr)->device_memory->base + 863b0c14ec4SDavid Hildenbrand memory_region_size(&MACHINE(spapr)->device_memory->mr); 8643f5dabceSDavid Gibson uint32_t lrdr_capacity[] = { 8650c9269a5SDavid Hildenbrand cpu_to_be32(max_device_addr >> 32), 8660c9269a5SDavid Hildenbrand cpu_to_be32(max_device_addr & 0xffffffff), 8677abf9797SAnton Blanchard cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32), 8687abf9797SAnton Blanchard cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff), 869fe6b6346SLike Xu cpu_to_be32(ms->smp.max_cpus / ms->smp.threads), 8703f5dabceSDavid Gibson }; 8713f5dabceSDavid Gibson 8723f5dabceSDavid Gibson _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); 8733f5dabceSDavid Gibson 8743f5dabceSDavid Gibson /* hypertas */ 8753f5dabceSDavid Gibson add_str(hypertas, "hcall-pft"); 8763f5dabceSDavid Gibson add_str(hypertas, "hcall-term"); 8773f5dabceSDavid Gibson add_str(hypertas, "hcall-dabr"); 8783f5dabceSDavid Gibson add_str(hypertas, "hcall-interrupt"); 8793f5dabceSDavid Gibson add_str(hypertas, "hcall-tce"); 8803f5dabceSDavid Gibson add_str(hypertas, "hcall-vio"); 8813f5dabceSDavid Gibson add_str(hypertas, "hcall-splpar"); 88210741314SNicholas Piggin add_str(hypertas, "hcall-join"); 8833f5dabceSDavid Gibson add_str(hypertas, "hcall-bulk"); 8843f5dabceSDavid Gibson add_str(hypertas, "hcall-set-mode"); 8853f5dabceSDavid Gibson add_str(hypertas, "hcall-sprg0"); 8863f5dabceSDavid Gibson add_str(hypertas, "hcall-copy"); 8873f5dabceSDavid Gibson add_str(hypertas, "hcall-debug"); 888c24ba3d0SLaurent Vivier add_str(hypertas, "hcall-vphn"); 88982123b75SBharata B Rao if (spapr_get_cap(spapr, SPAPR_CAP_RPT_INVALIDATE) == SPAPR_CAP_ON) { 89082123b75SBharata B Rao add_str(hypertas, "hcall-rpt-invalidate"); 89182123b75SBharata B Rao } 89282123b75SBharata B Rao 8933f5dabceSDavid Gibson add_str(qemu_hypertas, "hcall-memop1"); 8943f5dabceSDavid Gibson 8953f5dabceSDavid Gibson if (!kvm_enabled() || kvmppc_spapr_use_multitce()) { 8963f5dabceSDavid Gibson add_str(hypertas, "hcall-multi-tce"); 8973f5dabceSDavid Gibson } 89830f4b05bSDavid Gibson 89930f4b05bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 90030f4b05bSDavid Gibson add_str(hypertas, "hcall-hpt-resize"); 90130f4b05bSDavid Gibson } 90230f4b05bSDavid Gibson 90381b205ceSAlexey Kardashevskiy add_str(hypertas, "hcall-watchdog"); 90481b205ceSAlexey Kardashevskiy 9053f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions", 9063f5dabceSDavid Gibson hypertas->str, hypertas->len)); 9073f5dabceSDavid Gibson g_string_free(hypertas, TRUE); 9083f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions", 9093f5dabceSDavid Gibson qemu_hypertas->str, qemu_hypertas->len)); 9103f5dabceSDavid Gibson g_string_free(qemu_hypertas, TRUE); 9113f5dabceSDavid Gibson 9121eee9950SDaniel Henrique Barboza spapr_numa_write_rtas_dt(spapr, fdt, rtas); 913da9f80fbSSerhii Popovych 9140e236d34SNicholas Piggin /* 9150e236d34SNicholas Piggin * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log, 9160e236d34SNicholas Piggin * and 16 bytes per CPU for system reset error log plus an extra 8 bytes. 9170e236d34SNicholas Piggin * 9180e236d34SNicholas Piggin * The system reset requirements are driven by existing Linux and PowerVM 9190e236d34SNicholas Piggin * implementation which (contrary to PAPR) saves r3 in the error log 9200e236d34SNicholas Piggin * structure like machine check, so Linux expects to find the saved r3 9210e236d34SNicholas Piggin * value at the address in r3 upon FWNMI-enabled sreset interrupt (and 9220e236d34SNicholas Piggin * does not look at the error value). 9230e236d34SNicholas Piggin * 9240e236d34SNicholas Piggin * System reset interrupts are not subject to interlock like machine 9250e236d34SNicholas Piggin * check, so this memory area could be corrupted if the sreset is 9260e236d34SNicholas Piggin * interrupted by a machine check (or vice versa) if it was shared. To 9270e236d34SNicholas Piggin * prevent this, system reset uses per-CPU areas for the sreset save 9280e236d34SNicholas Piggin * area. A system reset that interrupts a system reset handler could 9290e236d34SNicholas Piggin * still overwrite this area, but Linux doesn't try to recover in that 9300e236d34SNicholas Piggin * case anyway. 9310e236d34SNicholas Piggin * 9320e236d34SNicholas Piggin * The extra 8 bytes is required because Linux's FWNMI error log check 9330e236d34SNicholas Piggin * is off-by-one. 9347381c5d1SAlexey Kardashevskiy * 9357381c5d1SAlexey Kardashevskiy * RTAS_MIN_SIZE is required for the RTAS blob itself. 9360e236d34SNicholas Piggin */ 9377381c5d1SAlexey Kardashevskiy _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_MIN_SIZE + 9387381c5d1SAlexey Kardashevskiy RTAS_ERROR_LOG_MAX + 9397381c5d1SAlexey Kardashevskiy ms->smp.max_cpus * sizeof(uint64_t) * 2 + 9407381c5d1SAlexey Kardashevskiy sizeof(uint64_t))); 9413f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max", 9423f5dabceSDavid Gibson RTAS_ERROR_LOG_MAX)); 9433f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate", 9443f5dabceSDavid Gibson RTAS_EVENT_SCAN_RATE)); 9453f5dabceSDavid Gibson 9464f441474SDavid Gibson g_assert(msi_nonbroken); 9473f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0)); 9483f5dabceSDavid Gibson 9493f5dabceSDavid Gibson /* 9503f5dabceSDavid Gibson * According to PAPR, rtas ibm,os-term does not guarantee a return 9513f5dabceSDavid Gibson * back to the guest cpu. 9523f5dabceSDavid Gibson * 9533f5dabceSDavid Gibson * While an additional ibm,extended-os-term property indicates 9543f5dabceSDavid Gibson * that rtas call return will always occur. Set this property. 9553f5dabceSDavid Gibson */ 9563f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0)); 9573f5dabceSDavid Gibson 9583f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity", 9593f5dabceSDavid Gibson lrdr_capacity, sizeof(lrdr_capacity))); 9603f5dabceSDavid Gibson 9613f5dabceSDavid Gibson spapr_dt_rtas_tokens(fdt, rtas); 9623f5dabceSDavid Gibson } 9633f5dabceSDavid Gibson 964db592b5bSCédric Le Goater /* 965db592b5bSCédric Le Goater * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU 966db592b5bSCédric Le Goater * and the XIVE features that the guest may request and thus the valid 967db592b5bSCédric Le Goater * values for bytes 23..26 of option vector 5: 968db592b5bSCédric Le Goater */ 969ce2918cbSDavid Gibson static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt, 970db592b5bSCédric Le Goater int chosen) 9719fb4541fSSam Bobroff { 972545d6e2bSSuraj Jitindar Singh PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); 973545d6e2bSSuraj Jitindar Singh 974f2b14e3aSCédric Le Goater char val[2 * 4] = { 975ca62823bSDavid Gibson 23, 0x00, /* XICS / XIVE mode */ 9769fb4541fSSam Bobroff 24, 0x00, /* Hash/Radix, filled in below. */ 9779fb4541fSSam Bobroff 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ 9789fb4541fSSam Bobroff 26, 0x40, /* Radix options: GTSE == yes. */ 9799fb4541fSSam Bobroff }; 9809fb4541fSSam Bobroff 981ca62823bSDavid Gibson if (spapr->irq->xics && spapr->irq->xive) { 982ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_BOTH; 983ca62823bSDavid Gibson } else if (spapr->irq->xive) { 984ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_EXPLOIT; 985ca62823bSDavid Gibson } else { 986ca62823bSDavid Gibson assert(spapr->irq->xics); 987ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_LEGACY; 988ca62823bSDavid Gibson } 989ca62823bSDavid Gibson 9907abd43baSSuraj Jitindar Singh if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0, 9917abd43baSSuraj Jitindar Singh first_ppc_cpu->compat_pvr)) { 992db592b5bSCédric Le Goater /* 993db592b5bSCédric Le Goater * If we're in a pre POWER9 compat mode then the guest should 994db592b5bSCédric Le Goater * do hash and use the legacy interrupt mode 995db592b5bSCédric Le Goater */ 996ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */ 9977abd43baSSuraj Jitindar Singh val[3] = 0x00; /* Hash */ 998ab5add4cSFabiano Rosas spapr_check_mmu_mode(false); 9997abd43baSSuraj Jitindar Singh } else if (kvm_enabled()) { 10009fb4541fSSam Bobroff if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) { 1001f2b14e3aSCédric Le Goater val[3] = 0x80; /* OV5_MMU_BOTH */ 10029fb4541fSSam Bobroff } else if (kvmppc_has_cap_mmu_radix()) { 1003f2b14e3aSCédric Le Goater val[3] = 0x40; /* OV5_MMU_RADIX_300 */ 10049fb4541fSSam Bobroff } else { 1005f2b14e3aSCédric Le Goater val[3] = 0x00; /* Hash */ 10069fb4541fSSam Bobroff } 10079fb4541fSSam Bobroff } else { 10087abd43baSSuraj Jitindar Singh /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */ 1009f2b14e3aSCédric Le Goater val[3] = 0xC0; 1010545d6e2bSSuraj Jitindar Singh } 10119fb4541fSSam Bobroff _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support", 10129fb4541fSSam Bobroff val, sizeof(val))); 10139fb4541fSSam Bobroff } 10149fb4541fSSam Bobroff 10151e0e1108SDavid Gibson static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) 10167c866c6aSDavid Gibson { 10177c866c6aSDavid Gibson MachineState *machine = MACHINE(spapr); 10186c3829a2SAlexey Kardashevskiy SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 1019c4b07531SJason A. Donenfeld uint8_t rng_seed[32]; 10207c866c6aSDavid Gibson int chosen; 10211e0e1108SDavid Gibson 10221e0e1108SDavid Gibson _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); 10231e0e1108SDavid Gibson 10241e0e1108SDavid Gibson if (reset) { 10253bf0844fSGreg Kurz const char *boot_device = spapr->boot_device; 1026aebb9b9cSDaniel Henrique Barboza g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); 10277c866c6aSDavid Gibson size_t cb = 0; 1028aebb9b9cSDaniel Henrique Barboza g_autofree char *bootlist = get_boot_devices_list(&cb); 10297c866c6aSDavid Gibson 10305ced7895SAlexey Kardashevskiy if (machine->kernel_cmdline && machine->kernel_cmdline[0]) { 10315ced7895SAlexey Kardashevskiy _FDT(fdt_setprop_string(fdt, chosen, "bootargs", 10325ced7895SAlexey Kardashevskiy machine->kernel_cmdline)); 10335ced7895SAlexey Kardashevskiy } 10341e0e1108SDavid Gibson 10355ced7895SAlexey Kardashevskiy if (spapr->initrd_size) { 10367c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start", 10377c866c6aSDavid Gibson spapr->initrd_base)); 10387c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end", 10397c866c6aSDavid Gibson spapr->initrd_base + spapr->initrd_size)); 10405ced7895SAlexey Kardashevskiy } 10417c866c6aSDavid Gibson 10427c866c6aSDavid Gibson if (spapr->kernel_size) { 104387262806SAlexey Kardashevskiy uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr), 10447c866c6aSDavid Gibson cpu_to_be64(spapr->kernel_size) }; 10457c866c6aSDavid Gibson 10467c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel", 10477c866c6aSDavid Gibson &kprop, sizeof(kprop))); 10487c866c6aSDavid Gibson if (spapr->kernel_le) { 10497c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0)); 10507c866c6aSDavid Gibson } 10517c866c6aSDavid Gibson } 105297ec4d21SPaolo Bonzini if (machine->boot_config.has_menu && machine->boot_config.menu) { 105397ec4d21SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", true))); 10547c866c6aSDavid Gibson } 10557c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width)); 10567c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height)); 10577c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth)); 10587c866c6aSDavid Gibson 10597c866c6aSDavid Gibson if (cb && bootlist) { 10607c866c6aSDavid Gibson int i; 10617c866c6aSDavid Gibson 10627c866c6aSDavid Gibson for (i = 0; i < cb; i++) { 10637c866c6aSDavid Gibson if (bootlist[i] == '\n') { 10647c866c6aSDavid Gibson bootlist[i] = ' '; 10657c866c6aSDavid Gibson } 10667c866c6aSDavid Gibson } 10677c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist)); 10687c866c6aSDavid Gibson } 10697c866c6aSDavid Gibson 10707c866c6aSDavid Gibson if (boot_device && strlen(boot_device)) { 10717c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device)); 10727c866c6aSDavid Gibson } 10737c866c6aSDavid Gibson 1074f73eb948SPaolo Bonzini if (spapr->want_stdout_path && stdout_path) { 107590ee4e01SNikunj A Dadhania /* 10761e0e1108SDavid Gibson * "linux,stdout-path" and "stdout" properties are 10771e0e1108SDavid Gibson * deprecated by linux kernel. New platforms should only 10781e0e1108SDavid Gibson * use the "stdout-path" property. Set the new property 10791e0e1108SDavid Gibson * and continue using older property to remain compatible 10801e0e1108SDavid Gibson * with the existing firmware. 108190ee4e01SNikunj A Dadhania */ 10827c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path)); 108390ee4e01SNikunj A Dadhania _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path)); 10847c866c6aSDavid Gibson } 10857c866c6aSDavid Gibson 10861e0e1108SDavid Gibson /* 10871e0e1108SDavid Gibson * We can deal with BAR reallocation just fine, advertise it 10881e0e1108SDavid Gibson * to the guest 10891e0e1108SDavid Gibson */ 10906c3829a2SAlexey Kardashevskiy if (smc->linux_pci_probe) { 10916c3829a2SAlexey Kardashevskiy _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0)); 10926c3829a2SAlexey Kardashevskiy } 10936c3829a2SAlexey Kardashevskiy 1094db592b5bSCédric Le Goater spapr_dt_ov5_platform_support(spapr, fdt, chosen); 10957c866c6aSDavid Gibson } 10967c866c6aSDavid Gibson 1097c4b07531SJason A. Donenfeld qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); 1098c4b07531SJason A. Donenfeld _FDT(fdt_setprop(fdt, chosen, "rng-seed", rng_seed, sizeof(rng_seed))); 1099c4b07531SJason A. Donenfeld 110091335a5eSDavid Gibson _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5")); 11011e0e1108SDavid Gibson } 11021e0e1108SDavid Gibson 1103ce2918cbSDavid Gibson static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt) 1104fca5f2dcSDavid Gibson { 1105fca5f2dcSDavid Gibson /* The /hypervisor node isn't in PAPR - this is a hack to allow PR 1106fca5f2dcSDavid Gibson * KVM to work under pHyp with some guest co-operation */ 1107fca5f2dcSDavid Gibson int hypervisor; 1108fca5f2dcSDavid Gibson uint8_t hypercall[16]; 1109fca5f2dcSDavid Gibson 1110fca5f2dcSDavid Gibson _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor")); 1111fca5f2dcSDavid Gibson /* indicate KVM hypercall interface */ 1112fca5f2dcSDavid Gibson _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm")); 1113fca5f2dcSDavid Gibson if (kvmppc_has_cap_fixup_hcalls()) { 1114fca5f2dcSDavid Gibson /* 1115fca5f2dcSDavid Gibson * Older KVM versions with older guest kernels were broken 1116fca5f2dcSDavid Gibson * with the magic page, don't allow the guest to map it. 1117fca5f2dcSDavid Gibson */ 1118fca5f2dcSDavid Gibson if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall, 1119fca5f2dcSDavid Gibson sizeof(hypercall))) { 1120fca5f2dcSDavid Gibson _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions", 1121fca5f2dcSDavid Gibson hypercall, sizeof(hypercall))); 1122fca5f2dcSDavid Gibson } 1123fca5f2dcSDavid Gibson } 1124fca5f2dcSDavid Gibson } 1125fca5f2dcSDavid Gibson 11260c21e073SDavid Gibson void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space) 112753018216SPaolo Bonzini { 1128c86c1affSDaniel Henrique Barboza MachineState *machine = MACHINE(spapr); 11293c0c47e3SDavid Gibson MachineClass *mc = MACHINE_GET_CLASS(machine); 1130ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 1131776e887fSGreg Kurz uint32_t root_drc_type_mask = 0; 11327c866c6aSDavid Gibson int ret; 113353018216SPaolo Bonzini void *fdt; 1134ce2918cbSDavid Gibson SpaprPhbState *phb; 1135398a0bd5SDavid Gibson char *buf; 113653018216SPaolo Bonzini 113797b32a6aSDavid Gibson fdt = g_malloc0(space); 113897b32a6aSDavid Gibson _FDT((fdt_create_empty_tree(fdt, space))); 113953018216SPaolo Bonzini 1140398a0bd5SDavid Gibson /* Root node */ 1141398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp")); 1142398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)")); 1143398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries")); 1144398a0bd5SDavid Gibson 11450a794529SDavid Gibson /* Guest UUID & Name*/ 1146398a0bd5SDavid Gibson buf = qemu_uuid_unparse_strdup(&qemu_uuid); 1147398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf)); 1148398a0bd5SDavid Gibson if (qemu_uuid_set) { 1149398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "system-id", buf)); 1150398a0bd5SDavid Gibson } 1151398a0bd5SDavid Gibson g_free(buf); 1152398a0bd5SDavid Gibson 1153398a0bd5SDavid Gibson if (qemu_get_vm_name()) { 1154398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name", 1155398a0bd5SDavid Gibson qemu_get_vm_name())); 1156398a0bd5SDavid Gibson } 1157398a0bd5SDavid Gibson 11580a794529SDavid Gibson /* Host Model & Serial Number */ 11590a794529SDavid Gibson if (spapr->host_model) { 11600a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model)); 11610a794529SDavid Gibson } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) { 11620a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-model", buf)); 11630a794529SDavid Gibson g_free(buf); 11640a794529SDavid Gibson } 11650a794529SDavid Gibson 11660a794529SDavid Gibson if (spapr->host_serial) { 11670a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial)); 11680a794529SDavid Gibson } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) { 11690a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf)); 11700a794529SDavid Gibson g_free(buf); 11710a794529SDavid Gibson } 11720a794529SDavid Gibson 1173398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2)); 1174398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); 117553018216SPaolo Bonzini 1176fc7e0765SDavid Gibson /* /interrupt controller */ 117705289273SDavid Gibson spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC); 1178fc7e0765SDavid Gibson 117991335a5eSDavid Gibson ret = spapr_dt_memory(spapr, fdt); 1180e8f986fcSBharata B Rao if (ret < 0) { 1181ce9863b7SCédric Le Goater error_report("couldn't setup memory nodes in fdt"); 1182e8f986fcSBharata B Rao exit(1); 118353018216SPaolo Bonzini } 118453018216SPaolo Bonzini 1185bf5a6696SDavid Gibson /* /vdevice */ 1186bf5a6696SDavid Gibson spapr_dt_vdevice(spapr->vio_bus, fdt); 118753018216SPaolo Bonzini 11884d9392beSThomas Huth if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) { 118991335a5eSDavid Gibson ret = spapr_dt_rng(fdt); 11904d9392beSThomas Huth if (ret < 0) { 1191ce9863b7SCédric Le Goater error_report("could not set up rng device in the fdt"); 11924d9392beSThomas Huth exit(1); 11934d9392beSThomas Huth } 11944d9392beSThomas Huth } 11954d9392beSThomas Huth 119653018216SPaolo Bonzini QLIST_FOREACH(phb, &spapr->phbs, list) { 11978cbe71ecSDavid Gibson ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL); 119853018216SPaolo Bonzini if (ret < 0) { 1199da34fed7SThomas Huth error_report("couldn't setup PCI devices in fdt"); 120053018216SPaolo Bonzini exit(1); 120153018216SPaolo Bonzini } 1202da34fed7SThomas Huth } 120353018216SPaolo Bonzini 120491335a5eSDavid Gibson spapr_dt_cpus(fdt, spapr); 120553018216SPaolo Bonzini 1206776e887fSGreg Kurz /* ibm,drc-indexes and friends */ 1207c20d332aSBharata B Rao if (smc->dr_lmb_enabled) { 1208776e887fSGreg Kurz root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB; 1209776e887fSGreg Kurz } 1210776e887fSGreg Kurz if (smc->dr_phb_enabled) { 1211776e887fSGreg Kurz root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB; 1212776e887fSGreg Kurz } 1213776e887fSGreg Kurz if (mc->nvdimm_supported) { 1214776e887fSGreg Kurz root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PMEM; 1215776e887fSGreg Kurz } 1216776e887fSGreg Kurz if (root_drc_type_mask) { 1217776e887fSGreg Kurz _FDT(spapr_dt_drc(fdt, 0, NULL, root_drc_type_mask)); 1218c20d332aSBharata B Rao } 1219c20d332aSBharata B Rao 1220c5514d0eSIgor Mammedov if (mc->has_hotpluggable_cpus) { 1221af81cf32SBharata B Rao int offset = fdt_path_offset(fdt, "/cpus"); 12229e7d38e8SDavid Gibson ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU); 1223af81cf32SBharata B Rao if (ret < 0) { 1224af81cf32SBharata B Rao error_report("Couldn't set up CPU DR device tree properties"); 1225af81cf32SBharata B Rao exit(1); 1226af81cf32SBharata B Rao } 1227af81cf32SBharata B Rao } 1228af81cf32SBharata B Rao 1229ffb1e275SDavid Gibson /* /event-sources */ 1230ffbb1705SMichael Roth spapr_dt_events(spapr, fdt); 1231ffb1e275SDavid Gibson 12323f5dabceSDavid Gibson /* /rtas */ 12333f5dabceSDavid Gibson spapr_dt_rtas(spapr, fdt); 12343f5dabceSDavid Gibson 12357c866c6aSDavid Gibson /* /chosen */ 12361e0e1108SDavid Gibson spapr_dt_chosen(spapr, fdt, reset); 1237cf6e5223SDavid Gibson 1238fca5f2dcSDavid Gibson /* /hypervisor */ 1239fca5f2dcSDavid Gibson if (kvm_enabled()) { 1240fca5f2dcSDavid Gibson spapr_dt_hypervisor(spapr, fdt); 1241fca5f2dcSDavid Gibson } 1242fca5f2dcSDavid Gibson 1243cf6e5223SDavid Gibson /* Build memory reserve map */ 1244a49f62b9SAlexey Kardashevskiy if (reset) { 1245cf6e5223SDavid Gibson if (spapr->kernel_size) { 124687262806SAlexey Kardashevskiy _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr, 124787262806SAlexey Kardashevskiy spapr->kernel_size))); 1248cf6e5223SDavid Gibson } 1249cf6e5223SDavid Gibson if (spapr->initrd_size) { 1250a49f62b9SAlexey Kardashevskiy _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, 1251a49f62b9SAlexey Kardashevskiy spapr->initrd_size))); 1252a49f62b9SAlexey Kardashevskiy } 1253cf6e5223SDavid Gibson } 1254cf6e5223SDavid Gibson 1255ee3a71e3SShivaprasad G Bhat /* NVDIMM devices */ 1256ee3a71e3SShivaprasad G Bhat if (mc->nvdimm_supported) { 1257f1aa45ffSDaniel Henrique Barboza spapr_dt_persistent_memory(spapr, fdt); 1258ee3a71e3SShivaprasad G Bhat } 1259ee3a71e3SShivaprasad G Bhat 1260997b6cfcSDavid Gibson return fdt; 126153018216SPaolo Bonzini } 126253018216SPaolo Bonzini 126353018216SPaolo Bonzini static uint64_t translate_kernel_address(void *opaque, uint64_t addr) 126453018216SPaolo Bonzini { 126587262806SAlexey Kardashevskiy SpaprMachineState *spapr = opaque; 126687262806SAlexey Kardashevskiy 126787262806SAlexey Kardashevskiy return (addr & 0x0fffffff) + spapr->kernel_addr; 126853018216SPaolo Bonzini } 126953018216SPaolo Bonzini 12701d1be34dSDavid Gibson static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, 12711d1be34dSDavid Gibson PowerPCCPU *cpu) 127253018216SPaolo Bonzini { 127353018216SPaolo Bonzini CPUPPCState *env = &cpu->env; 127453018216SPaolo Bonzini 12758d04fb55SJan Kiszka /* The TCG path should also be holding the BQL at this point */ 12768d04fb55SJan Kiszka g_assert(qemu_mutex_iothread_locked()); 12778d04fb55SJan Kiszka 1278120f738aSNicholas Piggin g_assert(!vhyp_cpu_in_nested(cpu)); 1279120f738aSNicholas Piggin 1280d41ccf6eSVíctor Colombo if (FIELD_EX64(env->msr, MSR, PR)) { 128153018216SPaolo Bonzini hcall_dprintf("Hypercall made with MSR[PR]=1\n"); 128253018216SPaolo Bonzini env->gpr[3] = H_PRIVILEGE; 128353018216SPaolo Bonzini } else { 128453018216SPaolo Bonzini env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); 128553018216SPaolo Bonzini } 128653018216SPaolo Bonzini } 128753018216SPaolo Bonzini 128800fd075eSBenjamin Herrenschmidt struct LPCRSyncState { 128900fd075eSBenjamin Herrenschmidt target_ulong value; 129000fd075eSBenjamin Herrenschmidt target_ulong mask; 129100fd075eSBenjamin Herrenschmidt }; 129200fd075eSBenjamin Herrenschmidt 129300fd075eSBenjamin Herrenschmidt static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg) 129400fd075eSBenjamin Herrenschmidt { 129500fd075eSBenjamin Herrenschmidt struct LPCRSyncState *s = arg.host_ptr; 129600fd075eSBenjamin Herrenschmidt PowerPCCPU *cpu = POWERPC_CPU(cs); 129700fd075eSBenjamin Herrenschmidt CPUPPCState *env = &cpu->env; 129800fd075eSBenjamin Herrenschmidt target_ulong lpcr; 129900fd075eSBenjamin Herrenschmidt 130000fd075eSBenjamin Herrenschmidt cpu_synchronize_state(cs); 130100fd075eSBenjamin Herrenschmidt lpcr = env->spr[SPR_LPCR]; 130200fd075eSBenjamin Herrenschmidt lpcr &= ~s->mask; 130300fd075eSBenjamin Herrenschmidt lpcr |= s->value; 130400fd075eSBenjamin Herrenschmidt ppc_store_lpcr(cpu, lpcr); 130500fd075eSBenjamin Herrenschmidt } 130600fd075eSBenjamin Herrenschmidt 130700fd075eSBenjamin Herrenschmidt void spapr_set_all_lpcrs(target_ulong value, target_ulong mask) 130800fd075eSBenjamin Herrenschmidt { 130900fd075eSBenjamin Herrenschmidt CPUState *cs; 131000fd075eSBenjamin Herrenschmidt struct LPCRSyncState s = { 131100fd075eSBenjamin Herrenschmidt .value = value, 131200fd075eSBenjamin Herrenschmidt .mask = mask 131300fd075eSBenjamin Herrenschmidt }; 131400fd075eSBenjamin Herrenschmidt CPU_FOREACH(cs) { 131500fd075eSBenjamin Herrenschmidt run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s)); 131600fd075eSBenjamin Herrenschmidt } 131700fd075eSBenjamin Herrenschmidt } 131800fd075eSBenjamin Herrenschmidt 1319f32d4ab4SNicholas Piggin static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu, 1320f32d4ab4SNicholas Piggin target_ulong lpid, ppc_v3_pate_t *entry) 13219861bb3eSSuraj Jitindar Singh { 1322ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1323120f738aSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 13249861bb3eSSuraj Jitindar Singh 1325120f738aSNicholas Piggin if (!spapr_cpu->in_nested) { 1326f32d4ab4SNicholas Piggin assert(lpid == 0); 1327f32d4ab4SNicholas Piggin 132879825f4dSBenjamin Herrenschmidt /* Copy PATE1:GR into PATE0:HR */ 132979825f4dSBenjamin Herrenschmidt entry->dw0 = spapr->patb_entry & PATE0_HR; 133079825f4dSBenjamin Herrenschmidt entry->dw1 = spapr->patb_entry; 1331f32d4ab4SNicholas Piggin 1332120f738aSNicholas Piggin } else { 1333120f738aSNicholas Piggin uint64_t patb, pats; 1334120f738aSNicholas Piggin 1335120f738aSNicholas Piggin assert(lpid != 0); 1336120f738aSNicholas Piggin 1337120f738aSNicholas Piggin patb = spapr->nested_ptcr & PTCR_PATB; 1338120f738aSNicholas Piggin pats = spapr->nested_ptcr & PTCR_PATS; 1339120f738aSNicholas Piggin 13403c2e80adSLeandro Lupori /* Check if partition table is properly aligned */ 13413c2e80adSLeandro Lupori if (patb & MAKE_64BIT_MASK(0, pats + 12)) { 13423c2e80adSLeandro Lupori return false; 13433c2e80adSLeandro Lupori } 13443c2e80adSLeandro Lupori 1345120f738aSNicholas Piggin /* Calculate number of entries */ 1346120f738aSNicholas Piggin pats = 1ull << (pats + 12 - 4); 1347120f738aSNicholas Piggin if (pats <= lpid) { 1348120f738aSNicholas Piggin return false; 1349120f738aSNicholas Piggin } 1350120f738aSNicholas Piggin 1351120f738aSNicholas Piggin /* Grab entry */ 1352120f738aSNicholas Piggin patb += 16 * lpid; 1353120f738aSNicholas Piggin entry->dw0 = ldq_phys(CPU(cpu)->as, patb); 1354120f738aSNicholas Piggin entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8); 1355120f738aSNicholas Piggin } 1356120f738aSNicholas Piggin 1357f32d4ab4SNicholas Piggin return true; 13589861bb3eSSuraj Jitindar Singh } 13599861bb3eSSuraj Jitindar Singh 1360e6b8fd24SSamuel Mendoza-Jonas #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) 1361e6b8fd24SSamuel Mendoza-Jonas #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) 1362e6b8fd24SSamuel Mendoza-Jonas #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) 1363e6b8fd24SSamuel Mendoza-Jonas #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY)) 1364e6b8fd24SSamuel Mendoza-Jonas #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY)) 1365e6b8fd24SSamuel Mendoza-Jonas 1366715c5407SDavid Gibson /* 1367715c5407SDavid Gibson * Get the fd to access the kernel htab, re-opening it if necessary 1368715c5407SDavid Gibson */ 1369ce2918cbSDavid Gibson static int get_htab_fd(SpaprMachineState *spapr) 1370715c5407SDavid Gibson { 137114b0d748SGreg Kurz Error *local_err = NULL; 137214b0d748SGreg Kurz 1373715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1374715c5407SDavid Gibson return spapr->htab_fd; 1375715c5407SDavid Gibson } 1376715c5407SDavid Gibson 137714b0d748SGreg Kurz spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err); 1378715c5407SDavid Gibson if (spapr->htab_fd < 0) { 137914b0d748SGreg Kurz error_report_err(local_err); 1380715c5407SDavid Gibson } 1381715c5407SDavid Gibson 1382715c5407SDavid Gibson return spapr->htab_fd; 1383715c5407SDavid Gibson } 1384715c5407SDavid Gibson 1385ce2918cbSDavid Gibson void close_htab_fd(SpaprMachineState *spapr) 1386715c5407SDavid Gibson { 1387715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1388715c5407SDavid Gibson close(spapr->htab_fd); 1389715c5407SDavid Gibson } 1390715c5407SDavid Gibson spapr->htab_fd = -1; 1391715c5407SDavid Gibson } 1392715c5407SDavid Gibson 1393e57ca75cSDavid Gibson static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp) 1394e57ca75cSDavid Gibson { 1395ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1396e57ca75cSDavid Gibson 1397e57ca75cSDavid Gibson return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1; 1398e57ca75cSDavid Gibson } 1399e57ca75cSDavid Gibson 14001ec26c75SGreg Kurz static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp) 14011ec26c75SGreg Kurz { 1402ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 14031ec26c75SGreg Kurz 14041ec26c75SGreg Kurz assert(kvm_enabled()); 14051ec26c75SGreg Kurz 14061ec26c75SGreg Kurz if (!spapr->htab) { 14071ec26c75SGreg Kurz return 0; 14081ec26c75SGreg Kurz } 14091ec26c75SGreg Kurz 14101ec26c75SGreg Kurz return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18); 14111ec26c75SGreg Kurz } 14121ec26c75SGreg Kurz 1413e57ca75cSDavid Gibson static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp, 1414e57ca75cSDavid Gibson hwaddr ptex, int n) 1415e57ca75cSDavid Gibson { 1416ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1417e57ca75cSDavid Gibson hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 1418e57ca75cSDavid Gibson 1419e57ca75cSDavid Gibson if (!spapr->htab) { 1420e57ca75cSDavid Gibson /* 1421e57ca75cSDavid Gibson * HTAB is controlled by KVM. Fetch into temporary buffer 1422e57ca75cSDavid Gibson */ 1423e57ca75cSDavid Gibson ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64); 1424e57ca75cSDavid Gibson kvmppc_read_hptes(hptes, ptex, n); 1425e57ca75cSDavid Gibson return hptes; 1426e57ca75cSDavid Gibson } 1427e57ca75cSDavid Gibson 1428e57ca75cSDavid Gibson /* 1429e57ca75cSDavid Gibson * HTAB is controlled by QEMU. Just point to the internally 1430e57ca75cSDavid Gibson * accessible PTEG. 1431e57ca75cSDavid Gibson */ 1432e57ca75cSDavid Gibson return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset); 1433e57ca75cSDavid Gibson } 1434e57ca75cSDavid Gibson 1435e57ca75cSDavid Gibson static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp, 1436e57ca75cSDavid Gibson const ppc_hash_pte64_t *hptes, 1437e57ca75cSDavid Gibson hwaddr ptex, int n) 1438e57ca75cSDavid Gibson { 1439ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1440e57ca75cSDavid Gibson 1441e57ca75cSDavid Gibson if (!spapr->htab) { 1442e57ca75cSDavid Gibson g_free((void *)hptes); 1443e57ca75cSDavid Gibson } 1444e57ca75cSDavid Gibson 1445e57ca75cSDavid Gibson /* Nothing to do for qemu managed HPT */ 1446e57ca75cSDavid Gibson } 1447e57ca75cSDavid Gibson 1448a2dd4e83SBenjamin Herrenschmidt void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 1449e57ca75cSDavid Gibson uint64_t pte0, uint64_t pte1) 1450e57ca75cSDavid Gibson { 1451a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp); 1452e57ca75cSDavid Gibson hwaddr offset = ptex * HASH_PTE_SIZE_64; 1453e57ca75cSDavid Gibson 1454e57ca75cSDavid Gibson if (!spapr->htab) { 1455e57ca75cSDavid Gibson kvmppc_write_hpte(ptex, pte0, pte1); 1456e57ca75cSDavid Gibson } else { 14573054b0caSBenjamin Herrenschmidt if (pte0 & HPTE64_V_VALID) { 14587bf00dfbSLeandro Lupori stq_p(spapr->htab + offset + HPTE64_DW1, pte1); 14593054b0caSBenjamin Herrenschmidt /* 14603054b0caSBenjamin Herrenschmidt * When setting valid, we write PTE1 first. This ensures 14613054b0caSBenjamin Herrenschmidt * proper synchronization with the reading code in 14623054b0caSBenjamin Herrenschmidt * ppc_hash64_pteg_search() 14633054b0caSBenjamin Herrenschmidt */ 14643054b0caSBenjamin Herrenschmidt smp_wmb(); 14653054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset, pte0); 14663054b0caSBenjamin Herrenschmidt } else { 14673054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset, pte0); 14683054b0caSBenjamin Herrenschmidt /* 14693054b0caSBenjamin Herrenschmidt * When clearing it we set PTE0 first. This ensures proper 14703054b0caSBenjamin Herrenschmidt * synchronization with the reading code in 14713054b0caSBenjamin Herrenschmidt * ppc_hash64_pteg_search() 14723054b0caSBenjamin Herrenschmidt */ 14733054b0caSBenjamin Herrenschmidt smp_wmb(); 14747bf00dfbSLeandro Lupori stq_p(spapr->htab + offset + HPTE64_DW1, pte1); 14753054b0caSBenjamin Herrenschmidt } 1476e57ca75cSDavid Gibson } 1477e57ca75cSDavid Gibson } 1478e57ca75cSDavid Gibson 1479a2dd4e83SBenjamin Herrenschmidt static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1480a2dd4e83SBenjamin Herrenschmidt uint64_t pte1) 1481a2dd4e83SBenjamin Herrenschmidt { 14827bf00dfbSLeandro Lupori hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C; 1483a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1484a2dd4e83SBenjamin Herrenschmidt 1485a2dd4e83SBenjamin Herrenschmidt if (!spapr->htab) { 1486a2dd4e83SBenjamin Herrenschmidt /* There should always be a hash table when this is called */ 1487a2dd4e83SBenjamin Herrenschmidt error_report("spapr_hpte_set_c called with no hash table !"); 1488a2dd4e83SBenjamin Herrenschmidt return; 1489a2dd4e83SBenjamin Herrenschmidt } 1490a2dd4e83SBenjamin Herrenschmidt 1491a2dd4e83SBenjamin Herrenschmidt /* The HW performs a non-atomic byte update */ 1492a2dd4e83SBenjamin Herrenschmidt stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80); 1493a2dd4e83SBenjamin Herrenschmidt } 1494a2dd4e83SBenjamin Herrenschmidt 1495a2dd4e83SBenjamin Herrenschmidt static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1496a2dd4e83SBenjamin Herrenschmidt uint64_t pte1) 1497a2dd4e83SBenjamin Herrenschmidt { 14987bf00dfbSLeandro Lupori hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R; 1499a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1500a2dd4e83SBenjamin Herrenschmidt 1501a2dd4e83SBenjamin Herrenschmidt if (!spapr->htab) { 1502a2dd4e83SBenjamin Herrenschmidt /* There should always be a hash table when this is called */ 1503a2dd4e83SBenjamin Herrenschmidt error_report("spapr_hpte_set_r called with no hash table !"); 1504a2dd4e83SBenjamin Herrenschmidt return; 1505a2dd4e83SBenjamin Herrenschmidt } 1506a2dd4e83SBenjamin Herrenschmidt 1507a2dd4e83SBenjamin Herrenschmidt /* The HW performs a non-atomic byte update */ 1508a2dd4e83SBenjamin Herrenschmidt stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01); 1509a2dd4e83SBenjamin Herrenschmidt } 1510a2dd4e83SBenjamin Herrenschmidt 15110b0b8310SDavid Gibson int spapr_hpt_shift_for_ramsize(uint64_t ramsize) 15128dfe8e7fSDavid Gibson { 15138dfe8e7fSDavid Gibson int shift; 15148dfe8e7fSDavid Gibson 15158dfe8e7fSDavid Gibson /* We aim for a hash table of size 1/128 the size of RAM (rounded 15168dfe8e7fSDavid Gibson * up). The PAPR recommendation is actually 1/64 of RAM size, but 15178dfe8e7fSDavid Gibson * that's much more than is needed for Linux guests */ 15188dfe8e7fSDavid Gibson shift = ctz64(pow2ceil(ramsize)) - 7; 15198dfe8e7fSDavid Gibson shift = MAX(shift, 18); /* Minimum architected size */ 15208dfe8e7fSDavid Gibson shift = MIN(shift, 46); /* Maximum architected size */ 15218dfe8e7fSDavid Gibson return shift; 15228dfe8e7fSDavid Gibson } 15238dfe8e7fSDavid Gibson 1524ce2918cbSDavid Gibson void spapr_free_hpt(SpaprMachineState *spapr) 152506ec79e8SBharata B Rao { 1526cb5b5ab9SXuzhou Cheng qemu_vfree(spapr->htab); 152706ec79e8SBharata B Rao spapr->htab = NULL; 152806ec79e8SBharata B Rao spapr->htab_shift = 0; 152906ec79e8SBharata B Rao close_htab_fd(spapr); 153006ec79e8SBharata B Rao } 153106ec79e8SBharata B Rao 1532a4e3a7c0SGreg Kurz int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp) 153353018216SPaolo Bonzini { 1534c3e051edSGreg Kurz ERRP_GUARD(); 1535c5f54f3eSDavid Gibson long rc; 153653018216SPaolo Bonzini 1537c5f54f3eSDavid Gibson /* Clean up any HPT info from a previous boot */ 153806ec79e8SBharata B Rao spapr_free_hpt(spapr); 153953018216SPaolo Bonzini 1540c5f54f3eSDavid Gibson rc = kvmppc_reset_htab(shift); 1541f0638a0bSFabiano Rosas 1542f0638a0bSFabiano Rosas if (rc == -EOPNOTSUPP) { 1543f0638a0bSFabiano Rosas error_setg(errp, "HPT not supported in nested guests"); 1544a4e3a7c0SGreg Kurz return -EOPNOTSUPP; 1545f0638a0bSFabiano Rosas } 1546f0638a0bSFabiano Rosas 1547c5f54f3eSDavid Gibson if (rc < 0) { 1548c5f54f3eSDavid Gibson /* kernel-side HPT needed, but couldn't allocate one */ 1549c3e051edSGreg Kurz error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d", 1550c5f54f3eSDavid Gibson shift); 1551c3e051edSGreg Kurz error_append_hint(errp, "Try smaller maxmem?\n"); 1552a4e3a7c0SGreg Kurz return -errno; 1553c5f54f3eSDavid Gibson } else if (rc > 0) { 1554c5f54f3eSDavid Gibson /* kernel-side HPT allocated */ 1555c5f54f3eSDavid Gibson if (rc != shift) { 1556c5f54f3eSDavid Gibson error_setg(errp, 1557c3e051edSGreg Kurz "Requested order %d HPT, but kernel allocated order %ld", 1558c5f54f3eSDavid Gibson shift, rc); 1559c3e051edSGreg Kurz error_append_hint(errp, "Try smaller maxmem?\n"); 1560a4e3a7c0SGreg Kurz return -ENOSPC; 15617735fedaSBharata B Rao } 15627735fedaSBharata B Rao 156353018216SPaolo Bonzini spapr->htab_shift = shift; 1564c18ad9a5SDavid Gibson spapr->htab = NULL; 1565b817772aSBharata B Rao } else { 1566c5f54f3eSDavid Gibson /* kernel-side HPT not needed, allocate in userspace instead */ 1567c5f54f3eSDavid Gibson size_t size = 1ULL << shift; 1568c5f54f3eSDavid Gibson int i; 156901a57972SSamuel Mendoza-Jonas 1570c5f54f3eSDavid Gibson spapr->htab = qemu_memalign(size, size); 1571c5f54f3eSDavid Gibson memset(spapr->htab, 0, size); 1572c5f54f3eSDavid Gibson spapr->htab_shift = shift; 1573b817772aSBharata B Rao 1574c5f54f3eSDavid Gibson for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { 1575c5f54f3eSDavid Gibson DIRTY_HPTE(HPTE(spapr->htab, i)); 15767735fedaSBharata B Rao } 157753018216SPaolo Bonzini } 1578ee4d9eccSSuraj Jitindar Singh /* We're setting up a hash table, so that means we're not radix */ 1579176dcceeSSuraj Jitindar Singh spapr->patb_entry = 0; 158000fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(0, LPCR_HR | LPCR_UPRT); 1581a4e3a7c0SGreg Kurz return 0; 158253018216SPaolo Bonzini } 158353018216SPaolo Bonzini 15848897ea5aSDavid Gibson void spapr_setup_hpt(SpaprMachineState *spapr) 1585b4db5413SSuraj Jitindar Singh { 15862772cf6bSDavid Gibson int hpt_shift; 15872772cf6bSDavid Gibson 1588087820e3SGreg Kurz if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { 15892772cf6bSDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); 15902772cf6bSDavid Gibson } else { 1591768a20f3SDavid Gibson uint64_t current_ram_size; 1592768a20f3SDavid Gibson 1593768a20f3SDavid Gibson current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size(); 1594768a20f3SDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size); 15952772cf6bSDavid Gibson } 15962772cf6bSDavid Gibson spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal); 15972772cf6bSDavid Gibson 15988897ea5aSDavid Gibson if (kvm_enabled()) { 15996a84737cSDavid Gibson hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift); 16006a84737cSDavid Gibson 16018897ea5aSDavid Gibson /* Check our RMA fits in the possible VRMA */ 16028897ea5aSDavid Gibson if (vrma_limit < spapr->rma_size) { 16038897ea5aSDavid Gibson error_report("Unable to create %" HWADDR_PRIu 16048897ea5aSDavid Gibson "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB", 16058897ea5aSDavid Gibson spapr->rma_size / MiB, vrma_limit / MiB); 16068897ea5aSDavid Gibson exit(EXIT_FAILURE); 16078897ea5aSDavid Gibson } 1608b4db5413SSuraj Jitindar Singh } 1609b4db5413SSuraj Jitindar Singh } 1610b4db5413SSuraj Jitindar Singh 1611068479e1SFabiano Rosas void spapr_check_mmu_mode(bool guest_radix) 1612068479e1SFabiano Rosas { 1613068479e1SFabiano Rosas if (guest_radix) { 1614068479e1SFabiano Rosas if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) { 1615068479e1SFabiano Rosas error_report("Guest requested unavailable MMU mode (radix)."); 1616068479e1SFabiano Rosas exit(EXIT_FAILURE); 1617068479e1SFabiano Rosas } 1618068479e1SFabiano Rosas } else { 1619068479e1SFabiano Rosas if (kvm_enabled() && kvmppc_has_cap_mmu_radix() 1620068479e1SFabiano Rosas && !kvmppc_has_cap_mmu_hash_v3()) { 1621068479e1SFabiano Rosas error_report("Guest requested unavailable MMU mode (hash)."); 1622068479e1SFabiano Rosas exit(EXIT_FAILURE); 1623068479e1SFabiano Rosas } 1624068479e1SFabiano Rosas } 1625068479e1SFabiano Rosas } 1626068479e1SFabiano Rosas 16277966d70fSJason A. Donenfeld static void spapr_machine_reset(MachineState *machine, ShutdownCause reason) 162853018216SPaolo Bonzini { 1629ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(machine); 1630182735efSAndreas Färber PowerPCCPU *first_ppc_cpu; 1631744a928cSAlexey Kardashevskiy hwaddr fdt_addr; 1632997b6cfcSDavid Gibson void *fdt; 1633997b6cfcSDavid Gibson int rc; 1634259186a7SAndreas Färber 16356c8ebe30SDavid Gibson pef_kvm_reset(machine->cgs, &error_fatal); 16369f6edd06SDavid Gibson spapr_caps_apply(spapr); 163733face6bSDavid Gibson 16381481fe5fSLaurent Vivier first_ppc_cpu = POWERPC_CPU(first_cpu); 16391481fe5fSLaurent Vivier if (kvm_enabled() && kvmppc_has_cap_mmu_radix() && 1640ad99d04cSDavid Gibson ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, 16411481fe5fSLaurent Vivier spapr->max_compat_pvr)) { 164279825f4dSBenjamin Herrenschmidt /* 164379825f4dSBenjamin Herrenschmidt * If using KVM with radix mode available, VCPUs can be started 1644b4db5413SSuraj Jitindar Singh * without a HPT because KVM will start them in radix mode. 164579825f4dSBenjamin Herrenschmidt * Set the GR bit in PATE so that we know there is no HPT. 164679825f4dSBenjamin Herrenschmidt */ 164779825f4dSBenjamin Herrenschmidt spapr->patb_entry = PATE1_GR; 164800fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT); 1649b4db5413SSuraj Jitindar Singh } else { 16508897ea5aSDavid Gibson spapr_setup_hpt(spapr); 1651c5f54f3eSDavid Gibson } 165253018216SPaolo Bonzini 16537966d70fSJason A. Donenfeld qemu_devices_reset(reason); 165425c9780dSDavid Gibson 16559012a53fSGreg Kurz spapr_ovec_cleanup(spapr->ov5_cas); 16569012a53fSGreg Kurz spapr->ov5_cas = spapr_ovec_new(); 16579012a53fSGreg Kurz 1658ce03a193SLaurent Vivier ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal); 16599012a53fSGreg Kurz 1660ec132efaSAlexey Kardashevskiy /* 1661b2e22477SCédric Le Goater * This is fixing some of the default configuration of the XIVE 1662b2e22477SCédric Le Goater * devices. To be called after the reset of the machine devices. 1663b2e22477SCédric Le Goater */ 1664b2e22477SCédric Le Goater spapr_irq_reset(spapr, &error_fatal); 1665b2e22477SCédric Le Goater 166623ff81bdSGreg Kurz /* 166723ff81bdSGreg Kurz * There is no CAS under qtest. Simulate one to please the code that 166823ff81bdSGreg Kurz * depends on spapr->ov5_cas. This is especially needed to test device 166923ff81bdSGreg Kurz * unplug, so we do that before resetting the DRCs. 167023ff81bdSGreg Kurz */ 167123ff81bdSGreg Kurz if (qtest_enabled()) { 167223ff81bdSGreg Kurz spapr_ovec_cleanup(spapr->ov5_cas); 167323ff81bdSGreg Kurz spapr->ov5_cas = spapr_ovec_clone(spapr->ov5); 167423ff81bdSGreg Kurz } 167523ff81bdSGreg Kurz 1676b5513584SShivaprasad G Bhat spapr_nvdimm_finish_flushes(); 1677b5513584SShivaprasad G Bhat 167882512483SGreg Kurz /* DRC reset may cause a device to be unplugged. This will cause troubles 167982512483SGreg Kurz * if this device is used by another device (eg, a running vhost backend 168082512483SGreg Kurz * will crash QEMU if the DIMM holding the vring goes away). To avoid such 168182512483SGreg Kurz * situations, we reset DRCs after all devices have been reset. 168282512483SGreg Kurz */ 168311055041SGreg Kurz spapr_drc_reset_all(spapr); 168482512483SGreg Kurz 168556258174SDaniel Henrique Barboza spapr_clear_pending_events(spapr); 168653018216SPaolo Bonzini 1687b7d1f77aSBenjamin Herrenschmidt /* 16884b98e72dSAlexey Kardashevskiy * We place the device tree just below either the top of the RMA, 1689df269271SAlexey Kardashevskiy * or just below 2GB, whichever is lower, so that it can be 1690b7d1f77aSBenjamin Herrenschmidt * processed with 32-bit real mode code if necessary 1691b7d1f77aSBenjamin Herrenschmidt */ 16924b98e72dSAlexey Kardashevskiy fdt_addr = MIN(spapr->rma_size, FDT_MAX_ADDR) - FDT_MAX_SIZE; 1693b7d1f77aSBenjamin Herrenschmidt 169497b32a6aSDavid Gibson fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE); 1695fc8c745dSAlexey Kardashevskiy if (spapr->vof) { 169621bde1ecSAlexey Kardashevskiy spapr_vof_reset(spapr, fdt, &error_fatal); 1697fc8c745dSAlexey Kardashevskiy /* 1698fc8c745dSAlexey Kardashevskiy * Do not pack the FDT as the client may change properties. 1699fc8c745dSAlexey Kardashevskiy * VOF client does not expect the FDT so we do not load it to the VM. 1700fc8c745dSAlexey Kardashevskiy */ 1701fc8c745dSAlexey Kardashevskiy } else { 1702997b6cfcSDavid Gibson rc = fdt_pack(fdt); 1703997b6cfcSDavid Gibson /* Should only fail if we've built a corrupted tree */ 1704997b6cfcSDavid Gibson assert(rc == 0); 1705997b6cfcSDavid Gibson 1706fc8c745dSAlexey Kardashevskiy spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, 1707fc8c745dSAlexey Kardashevskiy 0, fdt_addr, 0); 1708cae172abSDavid Gibson cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); 1709fc8c745dSAlexey Kardashevskiy } 1710fc8c745dSAlexey Kardashevskiy qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); 1711fc8c745dSAlexey Kardashevskiy 1712fea35ca4SAlexey Kardashevskiy g_free(spapr->fdt_blob); 1713fea35ca4SAlexey Kardashevskiy spapr->fdt_size = fdt_totalsize(fdt); 1714fea35ca4SAlexey Kardashevskiy spapr->fdt_initial_size = spapr->fdt_size; 1715fea35ca4SAlexey Kardashevskiy spapr->fdt_blob = fdt; 1716997b6cfcSDavid Gibson 1717d890f2faSDaniel Henrique Barboza /* Set machine->fdt for 'dumpdtb' QMP/HMP command */ 1718d890f2faSDaniel Henrique Barboza machine->fdt = fdt; 1719d890f2faSDaniel Henrique Barboza 172053018216SPaolo Bonzini /* Set up the entry state */ 1721182735efSAndreas Färber first_ppc_cpu->env.gpr[5] = 0; 172253018216SPaolo Bonzini 1723edfdbf9cSNicholas Piggin spapr->fwnmi_system_reset_addr = -1; 17248af7e1feSNicholas Piggin spapr->fwnmi_machine_check_addr = -1; 17258af7e1feSNicholas Piggin spapr->fwnmi_machine_check_interlock = -1; 17269ac703acSAravinda Prasad 17279ac703acSAravinda Prasad /* Signal all vCPUs waiting on this condition */ 17288af7e1feSNicholas Piggin qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond); 17292500fb42SAravinda Prasad 17302500fb42SAravinda Prasad migrate_del_blocker(spapr->fwnmi_migration_blocker); 173153018216SPaolo Bonzini } 173253018216SPaolo Bonzini 1733ce2918cbSDavid Gibson static void spapr_create_nvram(SpaprMachineState *spapr) 173453018216SPaolo Bonzini { 17353e80f690SMarkus Armbruster DeviceState *dev = qdev_new("spapr-nvram"); 17363978b863SPaolo Bonzini DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); 173753018216SPaolo Bonzini 17383978b863SPaolo Bonzini if (dinfo) { 1739934df912SMarkus Armbruster qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo), 17406231a6daSMarkus Armbruster &error_fatal); 174153018216SPaolo Bonzini } 174253018216SPaolo Bonzini 17433e80f690SMarkus Armbruster qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal); 174453018216SPaolo Bonzini 1745ce2918cbSDavid Gibson spapr->nvram = (struct SpaprNvram *)dev; 174653018216SPaolo Bonzini } 174753018216SPaolo Bonzini 1748ce2918cbSDavid Gibson static void spapr_rtc_create(SpaprMachineState *spapr) 174928df36a1SDavid Gibson { 17509fc7fc4dSMarkus Armbruster object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc, 17519fc7fc4dSMarkus Armbruster sizeof(spapr->rtc), TYPE_SPAPR_RTC, 1752f6d4dca8SThomas Huth &error_fatal, NULL); 1753ce189ab2SMarkus Armbruster qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal); 1754147ff807SCédric Le Goater object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc), 1755d2623129SMarkus Armbruster "date"); 175628df36a1SDavid Gibson } 175728df36a1SDavid Gibson 175853018216SPaolo Bonzini /* Returns whether we want to use VGA or not */ 175914c6a894SDavid Gibson static bool spapr_vga_init(PCIBus *pci_bus, Error **errp) 176053018216SPaolo Bonzini { 1761f9bcb2d6SGautam Agrawal vga_interface_created = true; 176253018216SPaolo Bonzini switch (vga_interface_type) { 176353018216SPaolo Bonzini case VGA_NONE: 17647effdaa3SMark Wu return false; 17657effdaa3SMark Wu case VGA_DEVICE: 17667effdaa3SMark Wu return true; 176753018216SPaolo Bonzini case VGA_STD: 1768b798c190SBenjamin Herrenschmidt case VGA_VIRTIO: 17696e66d0c6SThomas Huth case VGA_CIRRUS: 177053018216SPaolo Bonzini return pci_vga_init(pci_bus) != NULL; 177153018216SPaolo Bonzini default: 177214c6a894SDavid Gibson error_setg(errp, 177314c6a894SDavid Gibson "Unsupported VGA mode, only -vga std or -vga virtio is supported"); 177414c6a894SDavid Gibson return false; 177553018216SPaolo Bonzini } 177653018216SPaolo Bonzini } 177753018216SPaolo Bonzini 17784e5fe368SSuraj Jitindar Singh static int spapr_pre_load(void *opaque) 17794e5fe368SSuraj Jitindar Singh { 17804e5fe368SSuraj Jitindar Singh int rc; 17814e5fe368SSuraj Jitindar Singh 17824e5fe368SSuraj Jitindar Singh rc = spapr_caps_pre_load(opaque); 17834e5fe368SSuraj Jitindar Singh if (rc) { 17844e5fe368SSuraj Jitindar Singh return rc; 17854e5fe368SSuraj Jitindar Singh } 17864e5fe368SSuraj Jitindar Singh 17874e5fe368SSuraj Jitindar Singh return 0; 17884e5fe368SSuraj Jitindar Singh } 17894e5fe368SSuraj Jitindar Singh 1790880ae7deSDavid Gibson static int spapr_post_load(void *opaque, int version_id) 1791880ae7deSDavid Gibson { 1792ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1793880ae7deSDavid Gibson int err = 0; 1794880ae7deSDavid Gibson 1795be85537dSDavid Gibson err = spapr_caps_post_migration(spapr); 1796be85537dSDavid Gibson if (err) { 1797be85537dSDavid Gibson return err; 1798be85537dSDavid Gibson } 1799be85537dSDavid Gibson 1800e502202cSCédric Le Goater /* 1801e502202cSCédric Le Goater * In earlier versions, there was no separate qdev for the PAPR 1802880ae7deSDavid Gibson * RTC, so the RTC offset was stored directly in sPAPREnvironment. 1803880ae7deSDavid Gibson * So when migrating from those versions, poke the incoming offset 1804e502202cSCédric Le Goater * value into the RTC device 1805e502202cSCédric Le Goater */ 1806880ae7deSDavid Gibson if (version_id < 3) { 1807147ff807SCédric Le Goater err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset); 1808e502202cSCédric Le Goater if (err) { 1809e502202cSCédric Le Goater return err; 1810e502202cSCédric Le Goater } 1811880ae7deSDavid Gibson } 1812880ae7deSDavid Gibson 18130c86b2dfSLaurent Vivier if (kvm_enabled() && spapr->patb_entry) { 1814d39c90f5SBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(first_cpu); 181579825f4dSBenjamin Herrenschmidt bool radix = !!(spapr->patb_entry & PATE1_GR); 1816d39c90f5SBharata B Rao bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE); 1817d39c90f5SBharata B Rao 181800fd075eSBenjamin Herrenschmidt /* 181900fd075eSBenjamin Herrenschmidt * Update LPCR:HR and UPRT as they may not be set properly in 182000fd075eSBenjamin Herrenschmidt * the stream 182100fd075eSBenjamin Herrenschmidt */ 182200fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0, 182300fd075eSBenjamin Herrenschmidt LPCR_HR | LPCR_UPRT); 182400fd075eSBenjamin Herrenschmidt 1825d39c90f5SBharata B Rao err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry); 1826d39c90f5SBharata B Rao if (err) { 1827d39c90f5SBharata B Rao error_report("Process table config unsupported by the host"); 1828d39c90f5SBharata B Rao return -EINVAL; 1829d39c90f5SBharata B Rao } 1830d39c90f5SBharata B Rao } 1831d39c90f5SBharata B Rao 18321c53b06cSCédric Le Goater err = spapr_irq_post_load(spapr, version_id); 18331c53b06cSCédric Le Goater if (err) { 18341c53b06cSCédric Le Goater return err; 18351c53b06cSCédric Le Goater } 18361c53b06cSCédric Le Goater 1837880ae7deSDavid Gibson return err; 1838880ae7deSDavid Gibson } 1839880ae7deSDavid Gibson 18404e5fe368SSuraj Jitindar Singh static int spapr_pre_save(void *opaque) 18414e5fe368SSuraj Jitindar Singh { 18424e5fe368SSuraj Jitindar Singh int rc; 18434e5fe368SSuraj Jitindar Singh 18444e5fe368SSuraj Jitindar Singh rc = spapr_caps_pre_save(opaque); 18454e5fe368SSuraj Jitindar Singh if (rc) { 18464e5fe368SSuraj Jitindar Singh return rc; 18474e5fe368SSuraj Jitindar Singh } 18484e5fe368SSuraj Jitindar Singh 18494e5fe368SSuraj Jitindar Singh return 0; 18504e5fe368SSuraj Jitindar Singh } 18514e5fe368SSuraj Jitindar Singh 1852880ae7deSDavid Gibson static bool version_before_3(void *opaque, int version_id) 1853880ae7deSDavid Gibson { 1854880ae7deSDavid Gibson return version_id < 3; 1855880ae7deSDavid Gibson } 1856880ae7deSDavid Gibson 1857fd38804bSDaniel Henrique Barboza static bool spapr_pending_events_needed(void *opaque) 1858fd38804bSDaniel Henrique Barboza { 1859ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1860fd38804bSDaniel Henrique Barboza return !QTAILQ_EMPTY(&spapr->pending_events); 1861fd38804bSDaniel Henrique Barboza } 1862fd38804bSDaniel Henrique Barboza 1863fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_event_entry = { 1864fd38804bSDaniel Henrique Barboza .name = "spapr_event_log_entry", 1865fd38804bSDaniel Henrique Barboza .version_id = 1, 1866fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1867fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 1868ce2918cbSDavid Gibson VMSTATE_UINT32(summary, SpaprEventLogEntry), 1869ce2918cbSDavid Gibson VMSTATE_UINT32(extended_length, SpaprEventLogEntry), 1870ce2918cbSDavid Gibson VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0, 18715341258eSDavid Gibson NULL, extended_length), 1872fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1873fd38804bSDaniel Henrique Barboza }, 1874fd38804bSDaniel Henrique Barboza }; 1875fd38804bSDaniel Henrique Barboza 1876fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_pending_events = { 1877fd38804bSDaniel Henrique Barboza .name = "spapr_pending_events", 1878fd38804bSDaniel Henrique Barboza .version_id = 1, 1879fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1880fd38804bSDaniel Henrique Barboza .needed = spapr_pending_events_needed, 1881fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 1882ce2918cbSDavid Gibson VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1, 1883ce2918cbSDavid Gibson vmstate_spapr_event_entry, SpaprEventLogEntry, next), 1884fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1885fd38804bSDaniel Henrique Barboza }, 1886fd38804bSDaniel Henrique Barboza }; 1887fd38804bSDaniel Henrique Barboza 188862ef3760SMichael Roth static bool spapr_ov5_cas_needed(void *opaque) 188962ef3760SMichael Roth { 1890ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 1891ce2918cbSDavid Gibson SpaprOptionVector *ov5_mask = spapr_ovec_new(); 189262ef3760SMichael Roth bool cas_needed; 189362ef3760SMichael Roth 1894ce2918cbSDavid Gibson /* Prior to the introduction of SpaprOptionVector, we had two option 189562ef3760SMichael Roth * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY. 189662ef3760SMichael Roth * Both of these options encode machine topology into the device-tree 189762ef3760SMichael Roth * in such a way that the now-booted OS should still be able to interact 189862ef3760SMichael Roth * appropriately with QEMU regardless of what options were actually 189962ef3760SMichael Roth * negotiatied on the source side. 190062ef3760SMichael Roth * 190162ef3760SMichael Roth * As such, we can avoid migrating the CAS-negotiated options if these 190262ef3760SMichael Roth * are the only options available on the current machine/platform. 190362ef3760SMichael Roth * Since these are the only options available for pseries-2.7 and 190462ef3760SMichael Roth * earlier, this allows us to maintain old->new/new->old migration 190562ef3760SMichael Roth * compatibility. 190662ef3760SMichael Roth * 190762ef3760SMichael Roth * For QEMU 2.8+, there are additional CAS-negotiatable options available 190862ef3760SMichael Roth * via default pseries-2.8 machines and explicit command-line parameters. 190962ef3760SMichael Roth * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware 191062ef3760SMichael Roth * of the actual CAS-negotiated values to continue working properly. For 191162ef3760SMichael Roth * example, availability of memory unplug depends on knowing whether 191262ef3760SMichael Roth * OV5_HP_EVT was negotiated via CAS. 191362ef3760SMichael Roth * 191462ef3760SMichael Roth * Thus, for any cases where the set of available CAS-negotiatable 191562ef3760SMichael Roth * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we 1916aef19c04SGreg Kurz * include the CAS-negotiated options in the migration stream, unless 1917aef19c04SGreg Kurz * if they affect boot time behaviour only. 191862ef3760SMichael Roth */ 191962ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY); 192062ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY); 1921aef19c04SGreg Kurz spapr_ovec_set(ov5_mask, OV5_DRMEM_V2); 192262ef3760SMichael Roth 1923d1d32d62SDavid Gibson /* We need extra information if we have any bits outside the mask 1924d1d32d62SDavid Gibson * defined above */ 1925d1d32d62SDavid Gibson cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask); 192662ef3760SMichael Roth 192762ef3760SMichael Roth spapr_ovec_cleanup(ov5_mask); 192862ef3760SMichael Roth 192962ef3760SMichael Roth return cas_needed; 193062ef3760SMichael Roth } 193162ef3760SMichael Roth 193262ef3760SMichael Roth static const VMStateDescription vmstate_spapr_ov5_cas = { 193362ef3760SMichael Roth .name = "spapr_option_vector_ov5_cas", 193462ef3760SMichael Roth .version_id = 1, 193562ef3760SMichael Roth .minimum_version_id = 1, 193662ef3760SMichael Roth .needed = spapr_ov5_cas_needed, 193762ef3760SMichael Roth .fields = (VMStateField[]) { 1938ce2918cbSDavid Gibson VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1, 1939ce2918cbSDavid Gibson vmstate_spapr_ovec, SpaprOptionVector), 194062ef3760SMichael Roth VMSTATE_END_OF_LIST() 194162ef3760SMichael Roth }, 194262ef3760SMichael Roth }; 194362ef3760SMichael Roth 19449861bb3eSSuraj Jitindar Singh static bool spapr_patb_entry_needed(void *opaque) 19459861bb3eSSuraj Jitindar Singh { 1946ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 19479861bb3eSSuraj Jitindar Singh 19489861bb3eSSuraj Jitindar Singh return !!spapr->patb_entry; 19499861bb3eSSuraj Jitindar Singh } 19509861bb3eSSuraj Jitindar Singh 19519861bb3eSSuraj Jitindar Singh static const VMStateDescription vmstate_spapr_patb_entry = { 19529861bb3eSSuraj Jitindar Singh .name = "spapr_patb_entry", 19539861bb3eSSuraj Jitindar Singh .version_id = 1, 19549861bb3eSSuraj Jitindar Singh .minimum_version_id = 1, 19559861bb3eSSuraj Jitindar Singh .needed = spapr_patb_entry_needed, 19569861bb3eSSuraj Jitindar Singh .fields = (VMStateField[]) { 1957ce2918cbSDavid Gibson VMSTATE_UINT64(patb_entry, SpaprMachineState), 19589861bb3eSSuraj Jitindar Singh VMSTATE_END_OF_LIST() 19599861bb3eSSuraj Jitindar Singh }, 19609861bb3eSSuraj Jitindar Singh }; 19619861bb3eSSuraj Jitindar Singh 196282cffa2eSCédric Le Goater static bool spapr_irq_map_needed(void *opaque) 196382cffa2eSCédric Le Goater { 1964ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 196582cffa2eSCédric Le Goater 196682cffa2eSCédric Le Goater return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr); 196782cffa2eSCédric Le Goater } 196882cffa2eSCédric Le Goater 196982cffa2eSCédric Le Goater static const VMStateDescription vmstate_spapr_irq_map = { 197082cffa2eSCédric Le Goater .name = "spapr_irq_map", 197182cffa2eSCédric Le Goater .version_id = 1, 197282cffa2eSCédric Le Goater .minimum_version_id = 1, 197382cffa2eSCédric Le Goater .needed = spapr_irq_map_needed, 197482cffa2eSCédric Le Goater .fields = (VMStateField[]) { 1975ce2918cbSDavid Gibson VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr), 197682cffa2eSCédric Le Goater VMSTATE_END_OF_LIST() 197782cffa2eSCédric Le Goater }, 197882cffa2eSCédric Le Goater }; 197982cffa2eSCédric Le Goater 1980fea35ca4SAlexey Kardashevskiy static bool spapr_dtb_needed(void *opaque) 1981fea35ca4SAlexey Kardashevskiy { 1982ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque); 1983fea35ca4SAlexey Kardashevskiy 1984fea35ca4SAlexey Kardashevskiy return smc->update_dt_enabled; 1985fea35ca4SAlexey Kardashevskiy } 1986fea35ca4SAlexey Kardashevskiy 1987fea35ca4SAlexey Kardashevskiy static int spapr_dtb_pre_load(void *opaque) 1988fea35ca4SAlexey Kardashevskiy { 1989ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1990fea35ca4SAlexey Kardashevskiy 1991fea35ca4SAlexey Kardashevskiy g_free(spapr->fdt_blob); 1992fea35ca4SAlexey Kardashevskiy spapr->fdt_blob = NULL; 1993fea35ca4SAlexey Kardashevskiy spapr->fdt_size = 0; 1994fea35ca4SAlexey Kardashevskiy 1995fea35ca4SAlexey Kardashevskiy return 0; 1996fea35ca4SAlexey Kardashevskiy } 1997fea35ca4SAlexey Kardashevskiy 1998fea35ca4SAlexey Kardashevskiy static const VMStateDescription vmstate_spapr_dtb = { 1999fea35ca4SAlexey Kardashevskiy .name = "spapr_dtb", 2000fea35ca4SAlexey Kardashevskiy .version_id = 1, 2001fea35ca4SAlexey Kardashevskiy .minimum_version_id = 1, 2002fea35ca4SAlexey Kardashevskiy .needed = spapr_dtb_needed, 2003fea35ca4SAlexey Kardashevskiy .pre_load = spapr_dtb_pre_load, 2004fea35ca4SAlexey Kardashevskiy .fields = (VMStateField[]) { 2005ce2918cbSDavid Gibson VMSTATE_UINT32(fdt_initial_size, SpaprMachineState), 2006ce2918cbSDavid Gibson VMSTATE_UINT32(fdt_size, SpaprMachineState), 2007ce2918cbSDavid Gibson VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL, 2008fea35ca4SAlexey Kardashevskiy fdt_size), 2009fea35ca4SAlexey Kardashevskiy VMSTATE_END_OF_LIST() 2010fea35ca4SAlexey Kardashevskiy }, 2011fea35ca4SAlexey Kardashevskiy }; 2012fea35ca4SAlexey Kardashevskiy 20132500fb42SAravinda Prasad static bool spapr_fwnmi_needed(void *opaque) 20142500fb42SAravinda Prasad { 20152500fb42SAravinda Prasad SpaprMachineState *spapr = (SpaprMachineState *)opaque; 20162500fb42SAravinda Prasad 20178af7e1feSNicholas Piggin return spapr->fwnmi_machine_check_addr != -1; 20182500fb42SAravinda Prasad } 20192500fb42SAravinda Prasad 20202500fb42SAravinda Prasad static int spapr_fwnmi_pre_save(void *opaque) 20212500fb42SAravinda Prasad { 20222500fb42SAravinda Prasad SpaprMachineState *spapr = (SpaprMachineState *)opaque; 20232500fb42SAravinda Prasad 20242500fb42SAravinda Prasad /* 20252500fb42SAravinda Prasad * Check if machine check handling is in progress and print a 20262500fb42SAravinda Prasad * warning message. 20272500fb42SAravinda Prasad */ 20288af7e1feSNicholas Piggin if (spapr->fwnmi_machine_check_interlock != -1) { 20292500fb42SAravinda Prasad warn_report("A machine check is being handled during migration. The" 20302500fb42SAravinda Prasad "handler may run and log hardware error on the destination"); 20312500fb42SAravinda Prasad } 20322500fb42SAravinda Prasad 20332500fb42SAravinda Prasad return 0; 20342500fb42SAravinda Prasad } 20352500fb42SAravinda Prasad 20368af7e1feSNicholas Piggin static const VMStateDescription vmstate_spapr_fwnmi = { 20378af7e1feSNicholas Piggin .name = "spapr_fwnmi", 20382500fb42SAravinda Prasad .version_id = 1, 20392500fb42SAravinda Prasad .minimum_version_id = 1, 20402500fb42SAravinda Prasad .needed = spapr_fwnmi_needed, 20412500fb42SAravinda Prasad .pre_save = spapr_fwnmi_pre_save, 20422500fb42SAravinda Prasad .fields = (VMStateField[]) { 2043edfdbf9cSNicholas Piggin VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState), 20448af7e1feSNicholas Piggin VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState), 20458af7e1feSNicholas Piggin VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState), 20462500fb42SAravinda Prasad VMSTATE_END_OF_LIST() 20472500fb42SAravinda Prasad }, 20482500fb42SAravinda Prasad }; 20492500fb42SAravinda Prasad 20504be21d56SDavid Gibson static const VMStateDescription vmstate_spapr = { 20514be21d56SDavid Gibson .name = "spapr", 2052880ae7deSDavid Gibson .version_id = 3, 20534be21d56SDavid Gibson .minimum_version_id = 1, 20544e5fe368SSuraj Jitindar Singh .pre_load = spapr_pre_load, 2055880ae7deSDavid Gibson .post_load = spapr_post_load, 20564e5fe368SSuraj Jitindar Singh .pre_save = spapr_pre_save, 20574be21d56SDavid Gibson .fields = (VMStateField[]) { 2058880ae7deSDavid Gibson /* used to be @next_irq */ 2059880ae7deSDavid Gibson VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4), 20604be21d56SDavid Gibson 20614be21d56SDavid Gibson /* RTC offset */ 2062ce2918cbSDavid Gibson VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3), 2063880ae7deSDavid Gibson 2064ce2918cbSDavid Gibson VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2), 20654be21d56SDavid Gibson VMSTATE_END_OF_LIST() 20664be21d56SDavid Gibson }, 206762ef3760SMichael Roth .subsections = (const VMStateDescription*[]) { 206862ef3760SMichael Roth &vmstate_spapr_ov5_cas, 20699861bb3eSSuraj Jitindar Singh &vmstate_spapr_patb_entry, 2070fd38804bSDaniel Henrique Barboza &vmstate_spapr_pending_events, 20714e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_htm, 20724e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_vsx, 20734e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_dfp, 20748f38eaf8SSuraj Jitindar Singh &vmstate_spapr_cap_cfpc, 207509114fd8SSuraj Jitindar Singh &vmstate_spapr_cap_sbbc, 20764be8d4e7SSuraj Jitindar Singh &vmstate_spapr_cap_ibs, 207764d4a534SDavid Gibson &vmstate_spapr_cap_hpt_maxpagesize, 207882cffa2eSCédric Le Goater &vmstate_spapr_irq_map, 2079b9a477b7SSuraj Jitindar Singh &vmstate_spapr_cap_nested_kvm_hv, 2080fea35ca4SAlexey Kardashevskiy &vmstate_spapr_dtb, 2081c982f5cfSSuraj Jitindar Singh &vmstate_spapr_cap_large_decr, 20828ff43ee4SSuraj Jitindar Singh &vmstate_spapr_cap_ccf_assist, 20839d953ce4SAravinda Prasad &vmstate_spapr_cap_fwnmi, 20848af7e1feSNicholas Piggin &vmstate_spapr_fwnmi, 208582123b75SBharata B Rao &vmstate_spapr_cap_rpt_invalidate, 208662ef3760SMichael Roth NULL 208762ef3760SMichael Roth } 20884be21d56SDavid Gibson }; 20894be21d56SDavid Gibson 20904be21d56SDavid Gibson static int htab_save_setup(QEMUFile *f, void *opaque) 20914be21d56SDavid Gibson { 2092ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 20934be21d56SDavid Gibson 20944be21d56SDavid Gibson /* "Iteration" header */ 20953a384297SBharata B Rao if (!spapr->htab_shift) { 20963a384297SBharata B Rao qemu_put_be32(f, -1); 20973a384297SBharata B Rao } else { 20984be21d56SDavid Gibson qemu_put_be32(f, spapr->htab_shift); 20993a384297SBharata B Rao } 21004be21d56SDavid Gibson 2101e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 2102e68cb8b4SAlexey Kardashevskiy spapr->htab_save_index = 0; 2103e68cb8b4SAlexey Kardashevskiy spapr->htab_first_pass = true; 2104e68cb8b4SAlexey Kardashevskiy } else { 21053a384297SBharata B Rao if (spapr->htab_shift) { 2106e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 21074be21d56SDavid Gibson } 21083a384297SBharata B Rao } 21094be21d56SDavid Gibson 2110e68cb8b4SAlexey Kardashevskiy 2111e68cb8b4SAlexey Kardashevskiy return 0; 2112e68cb8b4SAlexey Kardashevskiy } 21134be21d56SDavid Gibson 2114ce2918cbSDavid Gibson static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr, 2115332f7721SGreg Kurz int chunkstart, int n_valid, int n_invalid) 2116332f7721SGreg Kurz { 2117332f7721SGreg Kurz qemu_put_be32(f, chunkstart); 2118332f7721SGreg Kurz qemu_put_be16(f, n_valid); 2119332f7721SGreg Kurz qemu_put_be16(f, n_invalid); 2120332f7721SGreg Kurz qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), 2121332f7721SGreg Kurz HASH_PTE_SIZE_64 * n_valid); 2122332f7721SGreg Kurz } 2123332f7721SGreg Kurz 2124332f7721SGreg Kurz static void htab_save_end_marker(QEMUFile *f) 2125332f7721SGreg Kurz { 2126332f7721SGreg Kurz qemu_put_be32(f, 0); 2127332f7721SGreg Kurz qemu_put_be16(f, 0); 2128332f7721SGreg Kurz qemu_put_be16(f, 0); 2129332f7721SGreg Kurz } 2130332f7721SGreg Kurz 2131ce2918cbSDavid Gibson static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr, 21324be21d56SDavid Gibson int64_t max_ns) 21334be21d56SDavid Gibson { 2134378bc217SDavid Gibson bool has_timeout = max_ns != -1; 21354be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 21364be21d56SDavid Gibson int index = spapr->htab_save_index; 2137bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 21384be21d56SDavid Gibson 21394be21d56SDavid Gibson assert(spapr->htab_first_pass); 21404be21d56SDavid Gibson 21414be21d56SDavid Gibson do { 21424be21d56SDavid Gibson int chunkstart; 21434be21d56SDavid Gibson 21444be21d56SDavid Gibson /* Consume invalid HPTEs */ 21454be21d56SDavid Gibson while ((index < htabslots) 21464be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 21474be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 214824ec2863SMarc-André Lureau index++; 21494be21d56SDavid Gibson } 21504be21d56SDavid Gibson 21514be21d56SDavid Gibson /* Consume valid HPTEs */ 21524be21d56SDavid Gibson chunkstart = index; 2153338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 21544be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 21554be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 215624ec2863SMarc-André Lureau index++; 21574be21d56SDavid Gibson } 21584be21d56SDavid Gibson 21594be21d56SDavid Gibson if (index > chunkstart) { 21604be21d56SDavid Gibson int n_valid = index - chunkstart; 21614be21d56SDavid Gibson 2162332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, 0); 21634be21d56SDavid Gibson 2164378bc217SDavid Gibson if (has_timeout && 2165378bc217SDavid Gibson (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 21664be21d56SDavid Gibson break; 21674be21d56SDavid Gibson } 21684be21d56SDavid Gibson } 2169e1fde0e0SJuan Quintela } while ((index < htabslots) && !migration_rate_exceeded(f)); 21704be21d56SDavid Gibson 21714be21d56SDavid Gibson if (index >= htabslots) { 21724be21d56SDavid Gibson assert(index == htabslots); 21734be21d56SDavid Gibson index = 0; 21744be21d56SDavid Gibson spapr->htab_first_pass = false; 21754be21d56SDavid Gibson } 21764be21d56SDavid Gibson spapr->htab_save_index = index; 21774be21d56SDavid Gibson } 21784be21d56SDavid Gibson 2179ce2918cbSDavid Gibson static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, 21804be21d56SDavid Gibson int64_t max_ns) 21814be21d56SDavid Gibson { 21824be21d56SDavid Gibson bool final = max_ns < 0; 21834be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 21844be21d56SDavid Gibson int examined = 0, sent = 0; 21854be21d56SDavid Gibson int index = spapr->htab_save_index; 2186bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 21874be21d56SDavid Gibson 21884be21d56SDavid Gibson assert(!spapr->htab_first_pass); 21894be21d56SDavid Gibson 21904be21d56SDavid Gibson do { 21914be21d56SDavid Gibson int chunkstart, invalidstart; 21924be21d56SDavid Gibson 21934be21d56SDavid Gibson /* Consume non-dirty HPTEs */ 21944be21d56SDavid Gibson while ((index < htabslots) 21954be21d56SDavid Gibson && !HPTE_DIRTY(HPTE(spapr->htab, index))) { 21964be21d56SDavid Gibson index++; 21974be21d56SDavid Gibson examined++; 21984be21d56SDavid Gibson } 21994be21d56SDavid Gibson 22004be21d56SDavid Gibson chunkstart = index; 22014be21d56SDavid Gibson /* Consume valid dirty HPTEs */ 2202338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 22034be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 22044be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 22054be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 22064be21d56SDavid Gibson index++; 22074be21d56SDavid Gibson examined++; 22084be21d56SDavid Gibson } 22094be21d56SDavid Gibson 22104be21d56SDavid Gibson invalidstart = index; 22114be21d56SDavid Gibson /* Consume invalid dirty HPTEs */ 2212338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - invalidstart < USHRT_MAX) 22134be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 22144be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 22154be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 22164be21d56SDavid Gibson index++; 22174be21d56SDavid Gibson examined++; 22184be21d56SDavid Gibson } 22194be21d56SDavid Gibson 22204be21d56SDavid Gibson if (index > chunkstart) { 22214be21d56SDavid Gibson int n_valid = invalidstart - chunkstart; 22224be21d56SDavid Gibson int n_invalid = index - invalidstart; 22234be21d56SDavid Gibson 2224332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid); 22254be21d56SDavid Gibson sent += index - chunkstart; 22264be21d56SDavid Gibson 2227bc72ad67SAlex Bligh if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 22284be21d56SDavid Gibson break; 22294be21d56SDavid Gibson } 22304be21d56SDavid Gibson } 22314be21d56SDavid Gibson 22324be21d56SDavid Gibson if (examined >= htabslots) { 22334be21d56SDavid Gibson break; 22344be21d56SDavid Gibson } 22354be21d56SDavid Gibson 22364be21d56SDavid Gibson if (index >= htabslots) { 22374be21d56SDavid Gibson assert(index == htabslots); 22384be21d56SDavid Gibson index = 0; 22394be21d56SDavid Gibson } 2240e1fde0e0SJuan Quintela } while ((examined < htabslots) && (!migration_rate_exceeded(f) || final)); 22414be21d56SDavid Gibson 22424be21d56SDavid Gibson if (index >= htabslots) { 22434be21d56SDavid Gibson assert(index == htabslots); 22444be21d56SDavid Gibson index = 0; 22454be21d56SDavid Gibson } 22464be21d56SDavid Gibson 22474be21d56SDavid Gibson spapr->htab_save_index = index; 22484be21d56SDavid Gibson 2249e68cb8b4SAlexey Kardashevskiy return (examined >= htabslots) && (sent == 0) ? 1 : 0; 22504be21d56SDavid Gibson } 22514be21d56SDavid Gibson 2252e68cb8b4SAlexey Kardashevskiy #define MAX_ITERATION_NS 5000000 /* 5 ms */ 2253e68cb8b4SAlexey Kardashevskiy #define MAX_KVM_BUF_SIZE 2048 2254e68cb8b4SAlexey Kardashevskiy 22554be21d56SDavid Gibson static int htab_save_iterate(QEMUFile *f, void *opaque) 22564be21d56SDavid Gibson { 2257ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2258715c5407SDavid Gibson int fd; 2259e68cb8b4SAlexey Kardashevskiy int rc = 0; 22604be21d56SDavid Gibson 22614be21d56SDavid Gibson /* Iteration header */ 22623a384297SBharata B Rao if (!spapr->htab_shift) { 22633a384297SBharata B Rao qemu_put_be32(f, -1); 2264e8cd4247SLaurent Vivier return 1; 22653a384297SBharata B Rao } else { 22664be21d56SDavid Gibson qemu_put_be32(f, 0); 22673a384297SBharata B Rao } 22684be21d56SDavid Gibson 2269e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2270e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2271e68cb8b4SAlexey Kardashevskiy 2272715c5407SDavid Gibson fd = get_htab_fd(spapr); 2273715c5407SDavid Gibson if (fd < 0) { 2274715c5407SDavid Gibson return fd; 227501a57972SSamuel Mendoza-Jonas } 227601a57972SSamuel Mendoza-Jonas 2277715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS); 2278e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2279e68cb8b4SAlexey Kardashevskiy return rc; 2280e68cb8b4SAlexey Kardashevskiy } 2281e68cb8b4SAlexey Kardashevskiy } else if (spapr->htab_first_pass) { 22824be21d56SDavid Gibson htab_save_first_pass(f, spapr, MAX_ITERATION_NS); 22834be21d56SDavid Gibson } else { 2284e68cb8b4SAlexey Kardashevskiy rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS); 22854be21d56SDavid Gibson } 22864be21d56SDavid Gibson 2287332f7721SGreg Kurz htab_save_end_marker(f); 22884be21d56SDavid Gibson 2289e68cb8b4SAlexey Kardashevskiy return rc; 22904be21d56SDavid Gibson } 22914be21d56SDavid Gibson 22924be21d56SDavid Gibson static int htab_save_complete(QEMUFile *f, void *opaque) 22934be21d56SDavid Gibson { 2294ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2295715c5407SDavid Gibson int fd; 22964be21d56SDavid Gibson 22974be21d56SDavid Gibson /* Iteration header */ 22983a384297SBharata B Rao if (!spapr->htab_shift) { 22993a384297SBharata B Rao qemu_put_be32(f, -1); 23003a384297SBharata B Rao return 0; 23013a384297SBharata B Rao } else { 23024be21d56SDavid Gibson qemu_put_be32(f, 0); 23033a384297SBharata B Rao } 23044be21d56SDavid Gibson 2305e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2306e68cb8b4SAlexey Kardashevskiy int rc; 2307e68cb8b4SAlexey Kardashevskiy 2308e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2309e68cb8b4SAlexey Kardashevskiy 2310715c5407SDavid Gibson fd = get_htab_fd(spapr); 2311715c5407SDavid Gibson if (fd < 0) { 2312715c5407SDavid Gibson return fd; 231301a57972SSamuel Mendoza-Jonas } 231401a57972SSamuel Mendoza-Jonas 2315715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1); 2316e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2317e68cb8b4SAlexey Kardashevskiy return rc; 2318e68cb8b4SAlexey Kardashevskiy } 2319e68cb8b4SAlexey Kardashevskiy } else { 2320378bc217SDavid Gibson if (spapr->htab_first_pass) { 2321378bc217SDavid Gibson htab_save_first_pass(f, spapr, -1); 2322378bc217SDavid Gibson } 23234be21d56SDavid Gibson htab_save_later_pass(f, spapr, -1); 2324e68cb8b4SAlexey Kardashevskiy } 23254be21d56SDavid Gibson 23264be21d56SDavid Gibson /* End marker */ 2327332f7721SGreg Kurz htab_save_end_marker(f); 23284be21d56SDavid Gibson 23294be21d56SDavid Gibson return 0; 23304be21d56SDavid Gibson } 23314be21d56SDavid Gibson 23324be21d56SDavid Gibson static int htab_load(QEMUFile *f, void *opaque, int version_id) 23334be21d56SDavid Gibson { 2334ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 23354be21d56SDavid Gibson uint32_t section_hdr; 2336e68cb8b4SAlexey Kardashevskiy int fd = -1; 233714b0d748SGreg Kurz Error *local_err = NULL; 23384be21d56SDavid Gibson 23394be21d56SDavid Gibson if (version_id < 1 || version_id > 1) { 234098a5d100SDavid Gibson error_report("htab_load() bad version"); 23414be21d56SDavid Gibson return -EINVAL; 23424be21d56SDavid Gibson } 23434be21d56SDavid Gibson 23444be21d56SDavid Gibson section_hdr = qemu_get_be32(f); 23454be21d56SDavid Gibson 23463a384297SBharata B Rao if (section_hdr == -1) { 23473a384297SBharata B Rao spapr_free_hpt(spapr); 23483a384297SBharata B Rao return 0; 23493a384297SBharata B Rao } 23503a384297SBharata B Rao 23514be21d56SDavid Gibson if (section_hdr) { 2352a4e3a7c0SGreg Kurz int ret; 2353a4e3a7c0SGreg Kurz 2354c5f54f3eSDavid Gibson /* First section gives the htab size */ 2355a4e3a7c0SGreg Kurz ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err); 2356a4e3a7c0SGreg Kurz if (ret < 0) { 2357c5f54f3eSDavid Gibson error_report_err(local_err); 2358a4e3a7c0SGreg Kurz return ret; 23594be21d56SDavid Gibson } 23604be21d56SDavid Gibson return 0; 23614be21d56SDavid Gibson } 23624be21d56SDavid Gibson 2363e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2364e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2365e68cb8b4SAlexey Kardashevskiy 236614b0d748SGreg Kurz fd = kvmppc_get_htab_fd(true, 0, &local_err); 2367e68cb8b4SAlexey Kardashevskiy if (fd < 0) { 236814b0d748SGreg Kurz error_report_err(local_err); 236982be8e73SGreg Kurz return fd; 2370e68cb8b4SAlexey Kardashevskiy } 2371e68cb8b4SAlexey Kardashevskiy } 2372e68cb8b4SAlexey Kardashevskiy 23734be21d56SDavid Gibson while (true) { 23744be21d56SDavid Gibson uint32_t index; 23754be21d56SDavid Gibson uint16_t n_valid, n_invalid; 23764be21d56SDavid Gibson 23774be21d56SDavid Gibson index = qemu_get_be32(f); 23784be21d56SDavid Gibson n_valid = qemu_get_be16(f); 23794be21d56SDavid Gibson n_invalid = qemu_get_be16(f); 23804be21d56SDavid Gibson 23814be21d56SDavid Gibson if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) { 23824be21d56SDavid Gibson /* End of Stream */ 23834be21d56SDavid Gibson break; 23844be21d56SDavid Gibson } 23854be21d56SDavid Gibson 2386e68cb8b4SAlexey Kardashevskiy if ((index + n_valid + n_invalid) > 23874be21d56SDavid Gibson (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) { 23884be21d56SDavid Gibson /* Bad index in stream */ 238998a5d100SDavid Gibson error_report( 239098a5d100SDavid Gibson "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)", 239198a5d100SDavid Gibson index, n_valid, n_invalid, spapr->htab_shift); 23924be21d56SDavid Gibson return -EINVAL; 23934be21d56SDavid Gibson } 23944be21d56SDavid Gibson 2395e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 23964be21d56SDavid Gibson if (n_valid) { 23974be21d56SDavid Gibson qemu_get_buffer(f, HPTE(spapr->htab, index), 23984be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_valid); 23994be21d56SDavid Gibson } 24004be21d56SDavid Gibson if (n_invalid) { 24014be21d56SDavid Gibson memset(HPTE(spapr->htab, index + n_valid), 0, 24024be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_invalid); 24034be21d56SDavid Gibson } 2404e68cb8b4SAlexey Kardashevskiy } else { 2405e68cb8b4SAlexey Kardashevskiy int rc; 2406e68cb8b4SAlexey Kardashevskiy 2407e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2408e68cb8b4SAlexey Kardashevskiy 24090a06e4d6SGreg Kurz rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid, 24100a06e4d6SGreg Kurz &local_err); 2411e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 24120a06e4d6SGreg Kurz error_report_err(local_err); 2413e68cb8b4SAlexey Kardashevskiy return rc; 2414e68cb8b4SAlexey Kardashevskiy } 2415e68cb8b4SAlexey Kardashevskiy } 2416e68cb8b4SAlexey Kardashevskiy } 2417e68cb8b4SAlexey Kardashevskiy 2418e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2419e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2420e68cb8b4SAlexey Kardashevskiy close(fd); 24214be21d56SDavid Gibson } 24224be21d56SDavid Gibson 24234be21d56SDavid Gibson return 0; 24244be21d56SDavid Gibson } 24254be21d56SDavid Gibson 242670f794fcSJuan Quintela static void htab_save_cleanup(void *opaque) 2427c573fc03SThomas Huth { 2428ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2429c573fc03SThomas Huth 2430c573fc03SThomas Huth close_htab_fd(spapr); 2431c573fc03SThomas Huth } 2432c573fc03SThomas Huth 24334be21d56SDavid Gibson static SaveVMHandlers savevm_htab_handlers = { 24349907e842SJuan Quintela .save_setup = htab_save_setup, 24354be21d56SDavid Gibson .save_live_iterate = htab_save_iterate, 2436a3e06c3dSDr. David Alan Gilbert .save_live_complete_precopy = htab_save_complete, 243770f794fcSJuan Quintela .save_cleanup = htab_save_cleanup, 24384be21d56SDavid Gibson .load_state = htab_load, 24394be21d56SDavid Gibson }; 24404be21d56SDavid Gibson 24415b2128d2SAlexander Graf static void spapr_boot_set(void *opaque, const char *boot_device, 24425b2128d2SAlexander Graf Error **errp) 24435b2128d2SAlexander Graf { 24443bf0844fSGreg Kurz SpaprMachineState *spapr = SPAPR_MACHINE(opaque); 24453bf0844fSGreg Kurz 24463bf0844fSGreg Kurz g_free(spapr->boot_device); 24473bf0844fSGreg Kurz spapr->boot_device = g_strdup(boot_device); 24485b2128d2SAlexander Graf } 24495b2128d2SAlexander Graf 2450ce2918cbSDavid Gibson static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr) 2451224245bfSDavid Gibson { 2452224245bfSDavid Gibson MachineState *machine = MACHINE(spapr); 2453224245bfSDavid Gibson uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 2454e8f986fcSBharata B Rao uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size; 2455224245bfSDavid Gibson int i; 2456224245bfSDavid Gibson 2457224245bfSDavid Gibson for (i = 0; i < nr_lmbs; i++) { 2458224245bfSDavid Gibson uint64_t addr; 2459224245bfSDavid Gibson 2460b0c14ec4SDavid Hildenbrand addr = i * lmb_size + machine->device_memory->base; 24616caf3ac6SDavid Gibson spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB, 2462224245bfSDavid Gibson addr / lmb_size); 2463224245bfSDavid Gibson } 2464224245bfSDavid Gibson } 2465224245bfSDavid Gibson 2466224245bfSDavid Gibson /* 2467224245bfSDavid Gibson * If RAM size, maxmem size and individual node mem sizes aren't aligned 2468224245bfSDavid Gibson * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest 2469224245bfSDavid Gibson * since we can't support such unaligned sizes with DRCONF_MEMORY. 2470224245bfSDavid Gibson */ 24717c150d6fSDavid Gibson static void spapr_validate_node_memory(MachineState *machine, Error **errp) 2472224245bfSDavid Gibson { 2473224245bfSDavid Gibson int i; 2474224245bfSDavid Gibson 24757c150d6fSDavid Gibson if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) { 24767c150d6fSDavid Gibson error_setg(errp, "Memory size 0x" RAM_ADDR_FMT 2477ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 24787c150d6fSDavid Gibson machine->ram_size, 2479d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 24807c150d6fSDavid Gibson return; 24817c150d6fSDavid Gibson } 24827c150d6fSDavid Gibson 24837c150d6fSDavid Gibson if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) { 24847c150d6fSDavid Gibson error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT 2485ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 24867c150d6fSDavid Gibson machine->ram_size, 2487d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 24887c150d6fSDavid Gibson return; 2489224245bfSDavid Gibson } 2490224245bfSDavid Gibson 2491aa570207STao Xu for (i = 0; i < machine->numa_state->num_nodes; i++) { 24927e721e7bSTao Xu if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) { 24937c150d6fSDavid Gibson error_setg(errp, 24947c150d6fSDavid Gibson "Node %d memory size 0x%" PRIx64 2495ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 24967e721e7bSTao Xu i, machine->numa_state->nodes[i].node_mem, 2497d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 24987c150d6fSDavid Gibson return; 2499224245bfSDavid Gibson } 2500224245bfSDavid Gibson } 2501224245bfSDavid Gibson } 2502224245bfSDavid Gibson 2503535455fdSIgor Mammedov /* find cpu slot in machine->possible_cpus by core_id */ 2504535455fdSIgor Mammedov static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx) 2505535455fdSIgor Mammedov { 2506fe6b6346SLike Xu int index = id / ms->smp.threads; 2507535455fdSIgor Mammedov 2508535455fdSIgor Mammedov if (index >= ms->possible_cpus->len) { 2509535455fdSIgor Mammedov return NULL; 2510535455fdSIgor Mammedov } 2511535455fdSIgor Mammedov if (idx) { 2512535455fdSIgor Mammedov *idx = index; 2513535455fdSIgor Mammedov } 2514535455fdSIgor Mammedov return &ms->possible_cpus->cpus[index]; 2515535455fdSIgor Mammedov } 2516535455fdSIgor Mammedov 2517ce2918cbSDavid Gibson static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp) 2518fa98fbfcSSam Bobroff { 2519fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 252029cb4187SGreg Kurz SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 2521fa98fbfcSSam Bobroff Error *local_err = NULL; 2522fa98fbfcSSam Bobroff bool vsmt_user = !!spapr->vsmt; 2523fa98fbfcSSam Bobroff int kvm_smt = kvmppc_smt_threads(); 2524fa98fbfcSSam Bobroff int ret; 2525fe6b6346SLike Xu unsigned int smp_threads = ms->smp.threads; 2526fa98fbfcSSam Bobroff 2527fa98fbfcSSam Bobroff if (!kvm_enabled() && (smp_threads > 1)) { 2528dcfe4805SMarkus Armbruster error_setg(errp, "TCG cannot support more than 1 thread/core " 2529fa98fbfcSSam Bobroff "on a pseries machine"); 2530dcfe4805SMarkus Armbruster return; 2531fa98fbfcSSam Bobroff } 2532fa98fbfcSSam Bobroff if (!is_power_of_2(smp_threads)) { 2533dcfe4805SMarkus Armbruster error_setg(errp, "Cannot support %d threads/core on a pseries " 2534fa98fbfcSSam Bobroff "machine because it must be a power of 2", smp_threads); 2535dcfe4805SMarkus Armbruster return; 2536fa98fbfcSSam Bobroff } 2537fa98fbfcSSam Bobroff 2538fa98fbfcSSam Bobroff /* Detemine the VSMT mode to use: */ 2539fa98fbfcSSam Bobroff if (vsmt_user) { 2540fa98fbfcSSam Bobroff if (spapr->vsmt < smp_threads) { 2541dcfe4805SMarkus Armbruster error_setg(errp, "Cannot support VSMT mode %d" 2542fa98fbfcSSam Bobroff " because it must be >= threads/core (%d)", 2543fa98fbfcSSam Bobroff spapr->vsmt, smp_threads); 2544dcfe4805SMarkus Armbruster return; 2545fa98fbfcSSam Bobroff } 2546fa98fbfcSSam Bobroff /* In this case, spapr->vsmt has been set by the command line */ 254729cb4187SGreg Kurz } else if (!smc->smp_threads_vsmt) { 25488904e5a7SDavid Gibson /* 25498904e5a7SDavid Gibson * Default VSMT value is tricky, because we need it to be as 25508904e5a7SDavid Gibson * consistent as possible (for migration), but this requires 25518904e5a7SDavid Gibson * changing it for at least some existing cases. We pick 8 as 25528904e5a7SDavid Gibson * the value that we'd get with KVM on POWER8, the 25538904e5a7SDavid Gibson * overwhelmingly common case in production systems. 25548904e5a7SDavid Gibson */ 25554ad64cbdSLaurent Vivier spapr->vsmt = MAX(8, smp_threads); 255629cb4187SGreg Kurz } else { 255729cb4187SGreg Kurz spapr->vsmt = smp_threads; 2558fa98fbfcSSam Bobroff } 2559fa98fbfcSSam Bobroff 2560fa98fbfcSSam Bobroff /* KVM: If necessary, set the SMT mode: */ 2561fa98fbfcSSam Bobroff if (kvm_enabled() && (spapr->vsmt != kvm_smt)) { 2562fa98fbfcSSam Bobroff ret = kvmppc_set_smt_threads(spapr->vsmt); 2563fa98fbfcSSam Bobroff if (ret) { 25641f20f2e0SDavid Gibson /* Looks like KVM isn't able to change VSMT mode */ 2565fa98fbfcSSam Bobroff error_setg(&local_err, 2566fa98fbfcSSam Bobroff "Failed to set KVM's VSMT mode to %d (errno %d)", 2567fa98fbfcSSam Bobroff spapr->vsmt, ret); 25681f20f2e0SDavid Gibson /* We can live with that if the default one is big enough 25691f20f2e0SDavid Gibson * for the number of threads, and a submultiple of the one 25701f20f2e0SDavid Gibson * we want. In this case we'll waste some vcpu ids, but 25711f20f2e0SDavid Gibson * behaviour will be correct */ 25721f20f2e0SDavid Gibson if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) { 25731f20f2e0SDavid Gibson warn_report_err(local_err); 25741f20f2e0SDavid Gibson } else { 2575fa98fbfcSSam Bobroff if (!vsmt_user) { 25761f20f2e0SDavid Gibson error_append_hint(&local_err, 25771f20f2e0SDavid Gibson "On PPC, a VM with %d threads/core" 25781f20f2e0SDavid Gibson " on a host with %d threads/core" 25791f20f2e0SDavid Gibson " requires the use of VSMT mode %d.\n", 2580fa98fbfcSSam Bobroff smp_threads, kvm_smt, spapr->vsmt); 2581fa98fbfcSSam Bobroff } 2582cdcca22aSVladimir Sementsov-Ogievskiy kvmppc_error_append_smt_possible_hint(&local_err); 2583dcfe4805SMarkus Armbruster error_propagate(errp, local_err); 2584fa98fbfcSSam Bobroff } 2585fa98fbfcSSam Bobroff } 25861f20f2e0SDavid Gibson } 2587fa98fbfcSSam Bobroff /* else TCG: nothing to do currently */ 2588fa98fbfcSSam Bobroff } 2589fa98fbfcSSam Bobroff 2590ce2918cbSDavid Gibson static void spapr_init_cpus(SpaprMachineState *spapr) 25911a5008fcSGreg Kurz { 25921a5008fcSGreg Kurz MachineState *machine = MACHINE(spapr); 25931a5008fcSGreg Kurz MachineClass *mc = MACHINE_GET_CLASS(machine); 2594ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 25951a5008fcSGreg Kurz const char *type = spapr_get_cpu_core_type(machine->cpu_type); 25961a5008fcSGreg Kurz const CPUArchIdList *possible_cpus; 2597fe6b6346SLike Xu unsigned int smp_cpus = machine->smp.cpus; 2598fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 2599fe6b6346SLike Xu unsigned int max_cpus = machine->smp.max_cpus; 26001a5008fcSGreg Kurz int boot_cores_nr = smp_cpus / smp_threads; 26011a5008fcSGreg Kurz int i; 26021a5008fcSGreg Kurz 26031a5008fcSGreg Kurz possible_cpus = mc->possible_cpu_arch_ids(machine); 26041a5008fcSGreg Kurz if (mc->has_hotpluggable_cpus) { 26051a5008fcSGreg Kurz if (smp_cpus % smp_threads) { 26061a5008fcSGreg Kurz error_report("smp_cpus (%u) must be multiple of threads (%u)", 26071a5008fcSGreg Kurz smp_cpus, smp_threads); 26081a5008fcSGreg Kurz exit(1); 26091a5008fcSGreg Kurz } 26101a5008fcSGreg Kurz if (max_cpus % smp_threads) { 26111a5008fcSGreg Kurz error_report("max_cpus (%u) must be multiple of threads (%u)", 26121a5008fcSGreg Kurz max_cpus, smp_threads); 26131a5008fcSGreg Kurz exit(1); 26141a5008fcSGreg Kurz } 26151a5008fcSGreg Kurz } else { 26161a5008fcSGreg Kurz if (max_cpus != smp_cpus) { 26171a5008fcSGreg Kurz error_report("This machine version does not support CPU hotplug"); 26181a5008fcSGreg Kurz exit(1); 26191a5008fcSGreg Kurz } 26201a5008fcSGreg Kurz boot_cores_nr = possible_cpus->len; 26211a5008fcSGreg Kurz } 26221a5008fcSGreg Kurz 26231a5008fcSGreg Kurz if (smc->pre_2_10_has_unused_icps) { 26241a5008fcSGreg Kurz int i; 26251a5008fcSGreg Kurz 26261a518e76SCédric Le Goater for (i = 0; i < spapr_max_server_number(spapr); i++) { 26271a5008fcSGreg Kurz /* Dummy entries get deregistered when real ICPState objects 26281a5008fcSGreg Kurz * are registered during CPU core hotplug. 26291a5008fcSGreg Kurz */ 26301a5008fcSGreg Kurz pre_2_10_vmstate_register_dummy_icp(i); 26311a5008fcSGreg Kurz } 26321a5008fcSGreg Kurz } 26331a5008fcSGreg Kurz 26341a5008fcSGreg Kurz for (i = 0; i < possible_cpus->len; i++) { 26351a5008fcSGreg Kurz int core_id = i * smp_threads; 26361a5008fcSGreg Kurz 26371a5008fcSGreg Kurz if (mc->has_hotpluggable_cpus) { 26381a5008fcSGreg Kurz spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU, 26391a5008fcSGreg Kurz spapr_vcpu_id(spapr, core_id)); 26401a5008fcSGreg Kurz } 26411a5008fcSGreg Kurz 26421a5008fcSGreg Kurz if (i < boot_cores_nr) { 26431a5008fcSGreg Kurz Object *core = object_new(type); 26441a5008fcSGreg Kurz int nr_threads = smp_threads; 26451a5008fcSGreg Kurz 26461a5008fcSGreg Kurz /* Handle the partially filled core for older machine types */ 26471a5008fcSGreg Kurz if ((i + 1) * smp_threads >= smp_cpus) { 26481a5008fcSGreg Kurz nr_threads = smp_cpus - i * smp_threads; 26491a5008fcSGreg Kurz } 26501a5008fcSGreg Kurz 26515325cc34SMarkus Armbruster object_property_set_int(core, "nr-threads", nr_threads, 26521a5008fcSGreg Kurz &error_fatal); 26535325cc34SMarkus Armbruster object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id, 26541a5008fcSGreg Kurz &error_fatal); 2655ce189ab2SMarkus Armbruster qdev_realize(DEVICE(core), NULL, &error_fatal); 2656ecda255eSSam Bobroff 2657ecda255eSSam Bobroff object_unref(core); 26581a5008fcSGreg Kurz } 26591a5008fcSGreg Kurz } 26601a5008fcSGreg Kurz } 26611a5008fcSGreg Kurz 2662999c9cafSGreg Kurz static PCIHostState *spapr_create_default_phb(void) 2663999c9cafSGreg Kurz { 2664999c9cafSGreg Kurz DeviceState *dev; 2665999c9cafSGreg Kurz 26663e80f690SMarkus Armbruster dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE); 2667999c9cafSGreg Kurz qdev_prop_set_uint32(dev, "index", 0); 26683c6ef471SMarkus Armbruster sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 2669999c9cafSGreg Kurz 2670999c9cafSGreg Kurz return PCI_HOST_BRIDGE(dev); 2671999c9cafSGreg Kurz } 2672999c9cafSGreg Kurz 2673425f0b7aSDavid Gibson static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp) 2674425f0b7aSDavid Gibson { 2675425f0b7aSDavid Gibson MachineState *machine = MACHINE(spapr); 2676425f0b7aSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 2677425f0b7aSDavid Gibson hwaddr rma_size = machine->ram_size; 2678425f0b7aSDavid Gibson hwaddr node0_size = spapr_node0_size(machine); 2679425f0b7aSDavid Gibson 2680425f0b7aSDavid Gibson /* RMA has to fit in the first NUMA node */ 2681425f0b7aSDavid Gibson rma_size = MIN(rma_size, node0_size); 2682425f0b7aSDavid Gibson 2683425f0b7aSDavid Gibson /* 2684425f0b7aSDavid Gibson * VRMA access is via a special 1TiB SLB mapping, so the RMA can 2685425f0b7aSDavid Gibson * never exceed that 2686425f0b7aSDavid Gibson */ 2687425f0b7aSDavid Gibson rma_size = MIN(rma_size, 1 * TiB); 2688425f0b7aSDavid Gibson 2689425f0b7aSDavid Gibson /* 2690425f0b7aSDavid Gibson * Clamp the RMA size based on machine type. This is for 2691425f0b7aSDavid Gibson * migration compatibility with older qemu versions, which limited 2692425f0b7aSDavid Gibson * the RMA size for complicated and mostly bad reasons. 2693425f0b7aSDavid Gibson */ 2694425f0b7aSDavid Gibson if (smc->rma_limit) { 2695425f0b7aSDavid Gibson rma_size = MIN(rma_size, smc->rma_limit); 2696425f0b7aSDavid Gibson } 2697425f0b7aSDavid Gibson 2698425f0b7aSDavid Gibson if (rma_size < MIN_RMA_SLOF) { 2699425f0b7aSDavid Gibson error_setg(errp, 2700425f0b7aSDavid Gibson "pSeries SLOF firmware requires >= %" HWADDR_PRIx 2701425f0b7aSDavid Gibson "ldMiB guest RMA (Real Mode Area memory)", 2702425f0b7aSDavid Gibson MIN_RMA_SLOF / MiB); 2703425f0b7aSDavid Gibson return 0; 2704425f0b7aSDavid Gibson } 2705425f0b7aSDavid Gibson 2706425f0b7aSDavid Gibson return rma_size; 2707425f0b7aSDavid Gibson } 2708425f0b7aSDavid Gibson 2709ce316b51SGreg Kurz static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr) 2710ce316b51SGreg Kurz { 2711ce316b51SGreg Kurz MachineState *machine = MACHINE(spapr); 2712ce316b51SGreg Kurz int i; 2713ce316b51SGreg Kurz 2714ce316b51SGreg Kurz for (i = 0; i < machine->ram_slots; i++) { 2715ce316b51SGreg Kurz spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i); 2716ce316b51SGreg Kurz } 2717ce316b51SGreg Kurz } 2718ce316b51SGreg Kurz 271953018216SPaolo Bonzini /* pSeries LPAR / sPAPR hardware init */ 2720bcb5ce08SDavid Gibson static void spapr_machine_init(MachineState *machine) 272153018216SPaolo Bonzini { 2722ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(machine); 2723ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 2724ee3a71e3SShivaprasad G Bhat MachineClass *mc = MACHINE_GET_CLASS(machine); 2725fc8c745dSAlexey Kardashevskiy const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME; 2726fc8c745dSAlexey Kardashevskiy const char *bios_name = machine->firmware ?: bios_default; 27275f2b96b3SDaniel Henrique Barboza g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 27283ef96221SMarcel Apfelbaum const char *kernel_filename = machine->kernel_filename; 27293ef96221SMarcel Apfelbaum const char *initrd_filename = machine->initrd_filename; 273053018216SPaolo Bonzini PCIHostState *phb; 2731f73eb948SPaolo Bonzini bool has_vga; 273253018216SPaolo Bonzini int i; 273353018216SPaolo Bonzini MemoryRegion *sysmem = get_system_memory(); 2734b7d1f77aSBenjamin Herrenschmidt long load_limit, fw_size; 273530f4b05bSDavid Gibson Error *resize_hpt_err = NULL; 273653018216SPaolo Bonzini 27375f2b96b3SDaniel Henrique Barboza if (!filename) { 27385f2b96b3SDaniel Henrique Barboza error_report("Could not find LPAR firmware '%s'", bios_name); 27395f2b96b3SDaniel Henrique Barboza exit(1); 27405f2b96b3SDaniel Henrique Barboza } 27415f2b96b3SDaniel Henrique Barboza fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); 27425f2b96b3SDaniel Henrique Barboza if (fw_size <= 0) { 27435f2b96b3SDaniel Henrique Barboza error_report("Could not load LPAR firmware '%s'", filename); 27445f2b96b3SDaniel Henrique Barboza exit(1); 27455f2b96b3SDaniel Henrique Barboza } 27465f2b96b3SDaniel Henrique Barboza 27476c8ebe30SDavid Gibson /* 27486c8ebe30SDavid Gibson * if Secure VM (PEF) support is configured, then initialize it 27496c8ebe30SDavid Gibson */ 27506c8ebe30SDavid Gibson pef_kvm_init(machine->cgs, &error_fatal); 27516c8ebe30SDavid Gibson 2752226419d6SMichael S. Tsirkin msi_nonbroken = true; 275353018216SPaolo Bonzini 275453018216SPaolo Bonzini QLIST_INIT(&spapr->phbs); 27550cffce56SDavid Gibson QTAILQ_INIT(&spapr->pending_dimm_unplugs); 275653018216SPaolo Bonzini 27579f6edd06SDavid Gibson /* Determine capabilities to run with */ 27589f6edd06SDavid Gibson spapr_caps_init(spapr); 27599f6edd06SDavid Gibson 276030f4b05bSDavid Gibson kvmppc_check_papr_resize_hpt(&resize_hpt_err); 276130f4b05bSDavid Gibson if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) { 276230f4b05bSDavid Gibson /* 276330f4b05bSDavid Gibson * If the user explicitly requested a mode we should either 276430f4b05bSDavid Gibson * supply it, or fail completely (which we do below). But if 276530f4b05bSDavid Gibson * it's not set explicitly, we reset our mode to something 276630f4b05bSDavid Gibson * that works 276730f4b05bSDavid Gibson */ 276830f4b05bSDavid Gibson if (resize_hpt_err) { 276930f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 277030f4b05bSDavid Gibson error_free(resize_hpt_err); 277130f4b05bSDavid Gibson resize_hpt_err = NULL; 277230f4b05bSDavid Gibson } else { 277330f4b05bSDavid Gibson spapr->resize_hpt = smc->resize_hpt_default; 277430f4b05bSDavid Gibson } 277530f4b05bSDavid Gibson } 277630f4b05bSDavid Gibson 277730f4b05bSDavid Gibson assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT); 277830f4b05bSDavid Gibson 277930f4b05bSDavid Gibson if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) { 278030f4b05bSDavid Gibson /* 278130f4b05bSDavid Gibson * User requested HPT resize, but this host can't supply it. Bail out 278230f4b05bSDavid Gibson */ 278330f4b05bSDavid Gibson error_report_err(resize_hpt_err); 278430f4b05bSDavid Gibson exit(1); 278530f4b05bSDavid Gibson } 278614963c34SMarkus Armbruster error_free(resize_hpt_err); 278730f4b05bSDavid Gibson 2788425f0b7aSDavid Gibson spapr->rma_size = spapr_rma_size(spapr, &error_fatal); 2789c4177479SAlexey Kardashevskiy 2790b7d1f77aSBenjamin Herrenschmidt /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */ 27914b98e72dSAlexey Kardashevskiy load_limit = MIN(spapr->rma_size, FDT_MAX_ADDR) - FW_OVERHEAD; 279253018216SPaolo Bonzini 2793482969d6SCédric Le Goater /* 2794482969d6SCédric Le Goater * VSMT must be set in order to be able to compute VCPU ids, ie to 27951a518e76SCédric Le Goater * call spapr_max_server_number() or spapr_vcpu_id(). 2796482969d6SCédric Le Goater */ 2797482969d6SCédric Le Goater spapr_set_vsmt_mode(spapr, &error_fatal); 2798482969d6SCédric Le Goater 27997b565160SDavid Gibson /* Set up Interrupt Controller before we create the VCPUs */ 2800fab397d8SCédric Le Goater spapr_irq_init(spapr, &error_fatal); 28017b565160SDavid Gibson 2802dc1b5eeeSGreg Kurz /* Set up containers for ibm,client-architecture-support negotiated options 2803dc1b5eeeSGreg Kurz */ 2804facdb8b6SMichael Roth spapr->ov5 = spapr_ovec_new(); 2805facdb8b6SMichael Roth spapr->ov5_cas = spapr_ovec_new(); 2806facdb8b6SMichael Roth 2807224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2808facdb8b6SMichael Roth spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); 28097c150d6fSDavid Gibson spapr_validate_node_memory(machine, &error_fatal); 2810224245bfSDavid Gibson } 2811224245bfSDavid Gibson 2812417ece33SMichael Roth spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); 2813417ece33SMichael Roth 2814e0eb84d4SDaniel Henrique Barboza /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */ 2815e0eb84d4SDaniel Henrique Barboza if (!smc->pre_6_2_numa_affinity) { 2816e0eb84d4SDaniel Henrique Barboza spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY); 2817e0eb84d4SDaniel Henrique Barboza } 2818e0eb84d4SDaniel Henrique Barboza 2819ffbb1705SMichael Roth /* advertise support for dedicated HP event source to guests */ 2820ffbb1705SMichael Roth if (spapr->use_hotplug_event_source) { 2821ffbb1705SMichael Roth spapr_ovec_set(spapr->ov5, OV5_HP_EVT); 2822ffbb1705SMichael Roth } 2823ffbb1705SMichael Roth 28242772cf6bSDavid Gibson /* advertise support for HPT resizing */ 28252772cf6bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 28262772cf6bSDavid Gibson spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE); 28272772cf6bSDavid Gibson } 28282772cf6bSDavid Gibson 2829a324d6f1SBharata B Rao /* advertise support for ibm,dyamic-memory-v2 */ 2830a324d6f1SBharata B Rao spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2); 2831a324d6f1SBharata B Rao 2832db592b5bSCédric Le Goater /* advertise XIVE on POWER9 machines */ 2833ca62823bSDavid Gibson if (spapr->irq->xive) { 2834db592b5bSCédric Le Goater spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); 2835db592b5bSCédric Le Goater } 2836db592b5bSCédric Le Goater 283753018216SPaolo Bonzini /* init CPUs */ 28380c86d0fdSDavid Gibson spapr_init_cpus(spapr); 283953018216SPaolo Bonzini 284066407069SDaniel Henrique Barboza spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine); 2841db5127b2SDavid Gibson 2842f1aa45ffSDaniel Henrique Barboza /* Init numa_assoc_array */ 2843f1aa45ffSDaniel Henrique Barboza spapr_numa_associativity_init(spapr, machine); 2844f1aa45ffSDaniel Henrique Barboza 28450550b120SGreg Kurz if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) && 2846ad99d04cSDavid Gibson ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, 28470550b120SGreg Kurz spapr->max_compat_pvr)) { 2848b4b83312SGreg Kurz spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300); 28490550b120SGreg Kurz /* KVM and TCG always allow GTSE with radix... */ 28500550b120SGreg Kurz spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE); 28510550b120SGreg Kurz } 28520550b120SGreg Kurz /* ... but not with hash (currently). */ 28530550b120SGreg Kurz 2854026bfd89SDavid Gibson if (kvm_enabled()) { 2855026bfd89SDavid Gibson /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */ 2856026bfd89SDavid Gibson kvmppc_enable_logical_ci_hcalls(); 2857ef9971ddSAlexey Kardashevskiy kvmppc_enable_set_mode_hcall(); 28585145ad4fSNathan Whitehorn 28595145ad4fSNathan Whitehorn /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */ 28605145ad4fSNathan Whitehorn kvmppc_enable_clear_ref_mod_hcalls(); 286168f9f708SSuraj Jitindar Singh 286268f9f708SSuraj Jitindar Singh /* Enable H_PAGE_INIT */ 286368f9f708SSuraj Jitindar Singh kvmppc_enable_h_page_init(); 2864026bfd89SDavid Gibson } 2865026bfd89SDavid Gibson 2866ab74e543SIgor Mammedov /* map RAM */ 2867ab74e543SIgor Mammedov memory_region_add_subregion(sysmem, 0, machine->ram); 286853018216SPaolo Bonzini 2869b0c14ec4SDavid Hildenbrand /* always allocate the device memory information */ 2870b0c14ec4SDavid Hildenbrand machine->device_memory = g_malloc0(sizeof(*machine->device_memory)); 2871b0c14ec4SDavid Hildenbrand 28724a1c9cf0SBharata B Rao /* initialize hotplug memory address space */ 28734a1c9cf0SBharata B Rao if (machine->ram_size < machine->maxram_size) { 28740c9269a5SDavid Hildenbrand ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size; 287571c9a3ddSBharata B Rao /* 287671c9a3ddSBharata B Rao * Limit the number of hotpluggable memory slots to half the number 287771c9a3ddSBharata B Rao * slots that KVM supports, leaving the other half for PCI and other 287871c9a3ddSBharata B Rao * devices. However ensure that number of slots doesn't drop below 32. 287971c9a3ddSBharata B Rao */ 288071c9a3ddSBharata B Rao int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 : 288171c9a3ddSBharata B Rao SPAPR_MAX_RAM_SLOTS; 28824a1c9cf0SBharata B Rao 288371c9a3ddSBharata B Rao if (max_memslots < SPAPR_MAX_RAM_SLOTS) { 288471c9a3ddSBharata B Rao max_memslots = SPAPR_MAX_RAM_SLOTS; 288571c9a3ddSBharata B Rao } 288671c9a3ddSBharata B Rao if (machine->ram_slots > max_memslots) { 2887d54e4d76SDavid Gibson error_report("Specified number of memory slots %" 2888d54e4d76SDavid Gibson PRIu64" exceeds max supported %d", 288971c9a3ddSBharata B Rao machine->ram_slots, max_memslots); 2890d54e4d76SDavid Gibson exit(1); 28914a1c9cf0SBharata B Rao } 28924a1c9cf0SBharata B Rao 2893b0c14ec4SDavid Hildenbrand machine->device_memory->base = ROUND_UP(machine->ram_size, 28940c9269a5SDavid Hildenbrand SPAPR_DEVICE_MEM_ALIGN); 2895b0c14ec4SDavid Hildenbrand memory_region_init(&machine->device_memory->mr, OBJECT(spapr), 28960c9269a5SDavid Hildenbrand "device-memory", device_mem_size); 2897b0c14ec4SDavid Hildenbrand memory_region_add_subregion(sysmem, machine->device_memory->base, 2898b0c14ec4SDavid Hildenbrand &machine->device_memory->mr); 28994a1c9cf0SBharata B Rao } 29004a1c9cf0SBharata B Rao 2901224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2902224245bfSDavid Gibson spapr_create_lmb_dr_connectors(spapr); 2903224245bfSDavid Gibson } 2904224245bfSDavid Gibson 29058af7e1feSNicholas Piggin if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_ON) { 29062500fb42SAravinda Prasad /* Create the error string for live migration blocker */ 29072500fb42SAravinda Prasad error_setg(&spapr->fwnmi_migration_blocker, 29082500fb42SAravinda Prasad "A machine check is being handled during migration. The handler" 29092500fb42SAravinda Prasad "may run and log hardware error on the destination"); 29102500fb42SAravinda Prasad } 29112500fb42SAravinda Prasad 2912ee3a71e3SShivaprasad G Bhat if (mc->nvdimm_supported) { 2913ee3a71e3SShivaprasad G Bhat spapr_create_nvdimm_dr_connectors(spapr); 2914ee3a71e3SShivaprasad G Bhat } 2915ee3a71e3SShivaprasad G Bhat 2916ffbb1705SMichael Roth /* Set up RTAS event infrastructure */ 291753018216SPaolo Bonzini spapr_events_init(spapr); 291853018216SPaolo Bonzini 291912f42174SDavid Gibson /* Set up the RTC RTAS interfaces */ 292028df36a1SDavid Gibson spapr_rtc_create(spapr); 292112f42174SDavid Gibson 292253018216SPaolo Bonzini /* Set up VIO bus */ 292353018216SPaolo Bonzini spapr->vio_bus = spapr_vio_bus_init(); 292453018216SPaolo Bonzini 292546ee119fSPaolo Bonzini for (i = 0; serial_hd(i); i++) { 29269bca0edbSPeter Maydell spapr_vty_create(spapr->vio_bus, serial_hd(i)); 292753018216SPaolo Bonzini } 292853018216SPaolo Bonzini 292953018216SPaolo Bonzini /* We always have at least the nvram device on VIO */ 293053018216SPaolo Bonzini spapr_create_nvram(spapr); 293153018216SPaolo Bonzini 2932962b6c36SMichael Roth /* 2933962b6c36SMichael Roth * Setup hotplug / dynamic-reconfiguration connectors. top-level 2934962b6c36SMichael Roth * connectors (described in root DT node's "ibm,drc-types" property) 2935962b6c36SMichael Roth * are pre-initialized here. additional child connectors (such as 2936962b6c36SMichael Roth * connectors for a PHBs PCI slots) are added as needed during their 2937962b6c36SMichael Roth * parent's realization. 2938962b6c36SMichael Roth */ 2939962b6c36SMichael Roth if (smc->dr_phb_enabled) { 2940962b6c36SMichael Roth for (i = 0; i < SPAPR_MAX_PHBS; i++) { 2941962b6c36SMichael Roth spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i); 2942962b6c36SMichael Roth } 2943962b6c36SMichael Roth } 2944962b6c36SMichael Roth 294553018216SPaolo Bonzini /* Set up PCI */ 294653018216SPaolo Bonzini spapr_pci_rtas_init(); 294753018216SPaolo Bonzini 2948999c9cafSGreg Kurz phb = spapr_create_default_phb(); 294953018216SPaolo Bonzini 295053018216SPaolo Bonzini for (i = 0; i < nb_nics; i++) { 295153018216SPaolo Bonzini NICInfo *nd = &nd_table[i]; 295253018216SPaolo Bonzini 295353018216SPaolo Bonzini if (!nd->model) { 29543c3a4e7aSThomas Huth nd->model = g_strdup("spapr-vlan"); 295553018216SPaolo Bonzini } 295653018216SPaolo Bonzini 29573c3a4e7aSThomas Huth if (g_str_equal(nd->model, "spapr-vlan") || 29583c3a4e7aSThomas Huth g_str_equal(nd->model, "ibmveth")) { 295953018216SPaolo Bonzini spapr_vlan_create(spapr->vio_bus, nd); 296053018216SPaolo Bonzini } else { 296129b358f9SDavid Gibson pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); 296253018216SPaolo Bonzini } 296353018216SPaolo Bonzini } 296453018216SPaolo Bonzini 296553018216SPaolo Bonzini for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { 296653018216SPaolo Bonzini spapr_vscsi_create(spapr->vio_bus); 296753018216SPaolo Bonzini } 296853018216SPaolo Bonzini 296953018216SPaolo Bonzini /* Graphics */ 2970f73eb948SPaolo Bonzini has_vga = spapr_vga_init(phb->bus, &error_fatal); 2971f73eb948SPaolo Bonzini if (has_vga) { 2972f73eb948SPaolo Bonzini spapr->want_stdout_path = !machine->enable_graphics; 2973c6e76503SPaolo Bonzini machine->usb |= defaults_enabled() && !machine->usb_disabled; 2974f73eb948SPaolo Bonzini } else { 2975f73eb948SPaolo Bonzini spapr->want_stdout_path = true; 297653018216SPaolo Bonzini } 297753018216SPaolo Bonzini 29784ee9ced9SMarcel Apfelbaum if (machine->usb) { 297957040d45SThomas Huth if (smc->use_ohci_by_default) { 298053018216SPaolo Bonzini pci_create_simple(phb->bus, -1, "pci-ohci"); 298157040d45SThomas Huth } else { 298257040d45SThomas Huth pci_create_simple(phb->bus, -1, "nec-usb-xhci"); 298357040d45SThomas Huth } 2984c86580b8SMarkus Armbruster 2985f73eb948SPaolo Bonzini if (has_vga) { 2986c86580b8SMarkus Armbruster USBBus *usb_bus = usb_bus_find(-1); 2987c86580b8SMarkus Armbruster 2988c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-kbd"); 2989c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-mouse"); 299053018216SPaolo Bonzini } 299153018216SPaolo Bonzini } 299253018216SPaolo Bonzini 299353018216SPaolo Bonzini if (kernel_filename) { 29945bb55f3eSAlexey Kardashevskiy uint64_t loaded_addr = 0; 29955bb55f3eSAlexey Kardashevskiy 29964366e1dbSLiam Merwick spapr->kernel_size = load_elf(kernel_filename, NULL, 299787262806SAlexey Kardashevskiy translate_kernel_address, spapr, 29985bb55f3eSAlexey Kardashevskiy NULL, &loaded_addr, NULL, NULL, 1, 2999a19f7fb0SDavid Gibson PPC_ELF_MACHINE, 0, 0); 3000a19f7fb0SDavid Gibson if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) { 30014366e1dbSLiam Merwick spapr->kernel_size = load_elf(kernel_filename, NULL, 3002617160c9SBALATON Zoltan translate_kernel_address, spapr, 30035bb55f3eSAlexey Kardashevskiy NULL, &loaded_addr, NULL, NULL, 0, 3004617160c9SBALATON Zoltan PPC_ELF_MACHINE, 0, 0); 3005a19f7fb0SDavid Gibson spapr->kernel_le = spapr->kernel_size > 0; 300616457e7fSBenjamin Herrenschmidt } 3007a19f7fb0SDavid Gibson if (spapr->kernel_size < 0) { 3008a19f7fb0SDavid Gibson error_report("error loading %s: %s", kernel_filename, 3009a19f7fb0SDavid Gibson load_elf_strerror(spapr->kernel_size)); 301053018216SPaolo Bonzini exit(1); 301153018216SPaolo Bonzini } 301253018216SPaolo Bonzini 30135bb55f3eSAlexey Kardashevskiy if (spapr->kernel_addr != loaded_addr) { 30145bb55f3eSAlexey Kardashevskiy warn_report("spapr: kernel_addr changed from 0x%"PRIx64 30155bb55f3eSAlexey Kardashevskiy " to 0x%"PRIx64, 30165bb55f3eSAlexey Kardashevskiy spapr->kernel_addr, loaded_addr); 30175bb55f3eSAlexey Kardashevskiy spapr->kernel_addr = loaded_addr; 30185bb55f3eSAlexey Kardashevskiy } 30195bb55f3eSAlexey Kardashevskiy 302053018216SPaolo Bonzini /* load initrd */ 302153018216SPaolo Bonzini if (initrd_filename) { 302253018216SPaolo Bonzini /* Try to locate the initrd in the gap between the kernel 302353018216SPaolo Bonzini * and the firmware. Add a bit of space just in case 302453018216SPaolo Bonzini */ 302587262806SAlexey Kardashevskiy spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size 3026a19f7fb0SDavid Gibson + 0x1ffff) & ~0xffff; 3027a19f7fb0SDavid Gibson spapr->initrd_size = load_image_targphys(initrd_filename, 3028a19f7fb0SDavid Gibson spapr->initrd_base, 3029a19f7fb0SDavid Gibson load_limit 3030a19f7fb0SDavid Gibson - spapr->initrd_base); 3031a19f7fb0SDavid Gibson if (spapr->initrd_size < 0) { 3032d54e4d76SDavid Gibson error_report("could not load initial ram disk '%s'", 303353018216SPaolo Bonzini initrd_filename); 303453018216SPaolo Bonzini exit(1); 303553018216SPaolo Bonzini } 303653018216SPaolo Bonzini } 303753018216SPaolo Bonzini } 303853018216SPaolo Bonzini 303928e02042SDavid Gibson /* FIXME: Should register things through the MachineState's qdev 304028e02042SDavid Gibson * interface, this is a legacy from the sPAPREnvironment structure 304128e02042SDavid Gibson * which predated MachineState but had a similar function */ 30424be21d56SDavid Gibson vmstate_register(NULL, 0, &vmstate_spapr, spapr); 30431df2c9a2SPeter Xu register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1, 30444be21d56SDavid Gibson &savevm_htab_handlers, spapr); 30454be21d56SDavid Gibson 30469bc6bfdfSMarkus Armbruster qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine)); 3047bb2bdd81SGreg Kurz 30485b2128d2SAlexander Graf qemu_register_boot_set(spapr_boot_set, spapr); 304942043e4fSLaurent Vivier 305093eac7b8SNicholas Piggin /* 305193eac7b8SNicholas Piggin * Nothing needs to be done to resume a suspended guest because 305293eac7b8SNicholas Piggin * suspending does not change the machine state, so no need for 305393eac7b8SNicholas Piggin * a ->wakeup method. 305493eac7b8SNicholas Piggin */ 305593eac7b8SNicholas Piggin qemu_register_wakeup_support(); 305693eac7b8SNicholas Piggin 305742043e4fSLaurent Vivier if (kvm_enabled()) { 30583dc410aeSAlexey Kardashevskiy /* to stop and start vmclock */ 305942043e4fSLaurent Vivier qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change, 306042043e4fSLaurent Vivier &spapr->tb); 30613dc410aeSAlexey Kardashevskiy 30623dc410aeSAlexey Kardashevskiy kvmppc_spapr_enable_inkernel_multitce(); 306342043e4fSLaurent Vivier } 30649ac703acSAravinda Prasad 30658af7e1feSNicholas Piggin qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond); 3066fc8c745dSAlexey Kardashevskiy if (spapr->vof) { 3067fc8c745dSAlexey Kardashevskiy spapr->vof->fw_size = fw_size; /* for claim() on itself */ 3068fc8c745dSAlexey Kardashevskiy spapr_register_hypercall(KVMPPC_H_VOF_CLIENT, spapr_h_vof_client); 3069fc8c745dSAlexey Kardashevskiy } 307081b205ceSAlexey Kardashevskiy 307181b205ceSAlexey Kardashevskiy spapr_watchdog_init(spapr); 307253018216SPaolo Bonzini } 307353018216SPaolo Bonzini 307407b10bc4SDaniel Henrique Barboza #define DEFAULT_KVM_TYPE "auto" 3075dc0ca80eSEric Auger static int spapr_kvm_type(MachineState *machine, const char *vm_type) 3076135a129aSAneesh Kumar K.V { 307707b10bc4SDaniel Henrique Barboza /* 307807b10bc4SDaniel Henrique Barboza * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to 307907b10bc4SDaniel Henrique Barboza * accomodate the 'HV' and 'PV' formats that exists in the 308007b10bc4SDaniel Henrique Barboza * wild. The 'auto' mode is being introduced already as 308107b10bc4SDaniel Henrique Barboza * lower-case, thus we don't need to bother checking for 308207b10bc4SDaniel Henrique Barboza * "AUTO". 308307b10bc4SDaniel Henrique Barboza */ 308407b10bc4SDaniel Henrique Barboza if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) { 3085135a129aSAneesh Kumar K.V return 0; 3086135a129aSAneesh Kumar K.V } 3087135a129aSAneesh Kumar K.V 308807b10bc4SDaniel Henrique Barboza if (!g_ascii_strcasecmp(vm_type, "hv")) { 3089135a129aSAneesh Kumar K.V return 1; 3090135a129aSAneesh Kumar K.V } 3091135a129aSAneesh Kumar K.V 309207b10bc4SDaniel Henrique Barboza if (!g_ascii_strcasecmp(vm_type, "pr")) { 3093135a129aSAneesh Kumar K.V return 2; 3094135a129aSAneesh Kumar K.V } 3095135a129aSAneesh Kumar K.V 3096135a129aSAneesh Kumar K.V error_report("Unknown kvm-type specified '%s'", vm_type); 3097135a129aSAneesh Kumar K.V exit(1); 3098135a129aSAneesh Kumar K.V } 3099135a129aSAneesh Kumar K.V 310071461b0fSAlexey Kardashevskiy /* 3101627b84f4SGonglei * Implementation of an interface to adjust firmware path 310271461b0fSAlexey Kardashevskiy * for the bootindex property handling. 310371461b0fSAlexey Kardashevskiy */ 310471461b0fSAlexey Kardashevskiy static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus, 310571461b0fSAlexey Kardashevskiy DeviceState *dev) 310671461b0fSAlexey Kardashevskiy { 310771461b0fSAlexey Kardashevskiy #define CAST(type, obj, name) \ 310871461b0fSAlexey Kardashevskiy ((type *)object_dynamic_cast(OBJECT(obj), (name))) 310971461b0fSAlexey Kardashevskiy SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE); 3110ce2918cbSDavid Gibson SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE); 3111c4e13492SFelipe Franciosi VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON); 3112040bdafcSGreg Kurz PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); 311371461b0fSAlexey Kardashevskiy 31141977434bSDaniel Henrique Barboza if (d && bus) { 311571461b0fSAlexey Kardashevskiy void *spapr = CAST(void, bus->parent, "spapr-vscsi"); 311671461b0fSAlexey Kardashevskiy VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI); 311771461b0fSAlexey Kardashevskiy USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE); 311871461b0fSAlexey Kardashevskiy 311971461b0fSAlexey Kardashevskiy if (spapr) { 312071461b0fSAlexey Kardashevskiy /* 312171461b0fSAlexey Kardashevskiy * Replace "channel@0/disk@0,0" with "disk@8000000000000000": 31221ac24c91SThomas Huth * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form 31231ac24c91SThomas Huth * 0x8000 | (target << 8) | (bus << 5) | lun 31241ac24c91SThomas Huth * (see the "Logical unit addressing format" table in SAM5) 312571461b0fSAlexey Kardashevskiy */ 31261ac24c91SThomas Huth unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun; 312771461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 312871461b0fSAlexey Kardashevskiy (uint64_t)id << 48); 312971461b0fSAlexey Kardashevskiy } else if (virtio) { 313071461b0fSAlexey Kardashevskiy /* 313171461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (target << 8) | lun 313271461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 313371461b0fSAlexey Kardashevskiy * Note: the quote above is from SLOF and it is wrong, 313471461b0fSAlexey Kardashevskiy * the actual binding is: 313571461b0fSAlexey Kardashevskiy * swap 0100 or 10 << or 20 << ( target lun-id -- srplun ) 313671461b0fSAlexey Kardashevskiy */ 313771461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (d->id << 16) | d->lun; 3138bac658d1SThomas Huth if (d->lun >= 256) { 3139bac658d1SThomas Huth /* Use the LUN "flat space addressing method" */ 3140bac658d1SThomas Huth id |= 0x4000; 3141bac658d1SThomas Huth } 314271461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 314371461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 314471461b0fSAlexey Kardashevskiy } else if (usb) { 314571461b0fSAlexey Kardashevskiy /* 314671461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (usb-port << 16) | lun 314771461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 314871461b0fSAlexey Kardashevskiy */ 314971461b0fSAlexey Kardashevskiy unsigned usb_port = atoi(usb->port->path); 315071461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (usb_port << 16) | d->lun; 315171461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 315271461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 315371461b0fSAlexey Kardashevskiy } 315471461b0fSAlexey Kardashevskiy } 315571461b0fSAlexey Kardashevskiy 3156b99260ebSThomas Huth /* 3157b99260ebSThomas Huth * SLOF probes the USB devices, and if it recognizes that the device is a 3158b99260ebSThomas Huth * storage device, it changes its name to "storage" instead of "usb-host", 3159b99260ebSThomas Huth * and additionally adds a child node for the SCSI LUN, so the correct 3160b99260ebSThomas Huth * boot path in SLOF is something like .../storage@1/disk@xxx" instead. 3161b99260ebSThomas Huth */ 3162b99260ebSThomas Huth if (strcmp("usb-host", qdev_fw_name(dev)) == 0) { 3163b99260ebSThomas Huth USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE); 3164b7b2a60bSGerd Hoffmann if (usb_device_is_scsi_storage(usbdev)) { 3165b99260ebSThomas Huth return g_strdup_printf("storage@%s/disk", usbdev->port->path); 3166b99260ebSThomas Huth } 3167b99260ebSThomas Huth } 3168b99260ebSThomas Huth 316971461b0fSAlexey Kardashevskiy if (phb) { 317071461b0fSAlexey Kardashevskiy /* Replace "pci" with "pci@800000020000000" */ 317171461b0fSAlexey Kardashevskiy return g_strdup_printf("pci@%"PRIX64, phb->buid); 317271461b0fSAlexey Kardashevskiy } 317371461b0fSAlexey Kardashevskiy 3174c4e13492SFelipe Franciosi if (vsc) { 3175c4e13492SFelipe Franciosi /* Same logic as virtio above */ 3176c4e13492SFelipe Franciosi unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun; 3177c4e13492SFelipe Franciosi return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32); 3178c4e13492SFelipe Franciosi } 3179c4e13492SFelipe Franciosi 31804871dd4cSThomas Huth if (g_str_equal("pci-bridge", qdev_fw_name(dev))) { 31814871dd4cSThomas Huth /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */ 31824871dd4cSThomas Huth PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); 31834871dd4cSThomas Huth return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn)); 31844871dd4cSThomas Huth } 31854871dd4cSThomas Huth 3186040bdafcSGreg Kurz if (pcidev) { 3187040bdafcSGreg Kurz return spapr_pci_fw_dev_name(pcidev); 3188040bdafcSGreg Kurz } 3189040bdafcSGreg Kurz 319071461b0fSAlexey Kardashevskiy return NULL; 319171461b0fSAlexey Kardashevskiy } 319271461b0fSAlexey Kardashevskiy 319323825581SEduardo Habkost static char *spapr_get_kvm_type(Object *obj, Error **errp) 319423825581SEduardo Habkost { 3195ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 319623825581SEduardo Habkost 319728e02042SDavid Gibson return g_strdup(spapr->kvm_type); 319823825581SEduardo Habkost } 319923825581SEduardo Habkost 320023825581SEduardo Habkost static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp) 320123825581SEduardo Habkost { 3202ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 320323825581SEduardo Habkost 320428e02042SDavid Gibson g_free(spapr->kvm_type); 320528e02042SDavid Gibson spapr->kvm_type = g_strdup(value); 320623825581SEduardo Habkost } 320723825581SEduardo Habkost 3208f6229214SMichael Roth static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp) 3209f6229214SMichael Roth { 3210ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3211f6229214SMichael Roth 3212f6229214SMichael Roth return spapr->use_hotplug_event_source; 3213f6229214SMichael Roth } 3214f6229214SMichael Roth 3215f6229214SMichael Roth static void spapr_set_modern_hotplug_events(Object *obj, bool value, 3216f6229214SMichael Roth Error **errp) 3217f6229214SMichael Roth { 3218ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3219f6229214SMichael Roth 3220f6229214SMichael Roth spapr->use_hotplug_event_source = value; 3221f6229214SMichael Roth } 3222f6229214SMichael Roth 3223fcad0d21SAlexey Kardashevskiy static bool spapr_get_msix_emulation(Object *obj, Error **errp) 3224fcad0d21SAlexey Kardashevskiy { 3225fcad0d21SAlexey Kardashevskiy return true; 3226fcad0d21SAlexey Kardashevskiy } 3227fcad0d21SAlexey Kardashevskiy 322830f4b05bSDavid Gibson static char *spapr_get_resize_hpt(Object *obj, Error **errp) 322930f4b05bSDavid Gibson { 3230ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 323130f4b05bSDavid Gibson 323230f4b05bSDavid Gibson switch (spapr->resize_hpt) { 323330f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DEFAULT: 323430f4b05bSDavid Gibson return g_strdup("default"); 323530f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DISABLED: 323630f4b05bSDavid Gibson return g_strdup("disabled"); 323730f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_ENABLED: 323830f4b05bSDavid Gibson return g_strdup("enabled"); 323930f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_REQUIRED: 324030f4b05bSDavid Gibson return g_strdup("required"); 324130f4b05bSDavid Gibson } 324230f4b05bSDavid Gibson g_assert_not_reached(); 324330f4b05bSDavid Gibson } 324430f4b05bSDavid Gibson 324530f4b05bSDavid Gibson static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp) 324630f4b05bSDavid Gibson { 3247ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 324830f4b05bSDavid Gibson 324930f4b05bSDavid Gibson if (strcmp(value, "default") == 0) { 325030f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT; 325130f4b05bSDavid Gibson } else if (strcmp(value, "disabled") == 0) { 325230f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 325330f4b05bSDavid Gibson } else if (strcmp(value, "enabled") == 0) { 325430f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED; 325530f4b05bSDavid Gibson } else if (strcmp(value, "required") == 0) { 325630f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED; 325730f4b05bSDavid Gibson } else { 325830f4b05bSDavid Gibson error_setg(errp, "Bad value for \"resize-hpt\" property"); 325930f4b05bSDavid Gibson } 326030f4b05bSDavid Gibson } 326130f4b05bSDavid Gibson 3262fc8c745dSAlexey Kardashevskiy static bool spapr_get_vof(Object *obj, Error **errp) 3263fc8c745dSAlexey Kardashevskiy { 3264fc8c745dSAlexey Kardashevskiy SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3265fc8c745dSAlexey Kardashevskiy 3266fc8c745dSAlexey Kardashevskiy return spapr->vof != NULL; 3267fc8c745dSAlexey Kardashevskiy } 3268fc8c745dSAlexey Kardashevskiy 3269fc8c745dSAlexey Kardashevskiy static void spapr_set_vof(Object *obj, bool value, Error **errp) 3270fc8c745dSAlexey Kardashevskiy { 3271fc8c745dSAlexey Kardashevskiy SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3272fc8c745dSAlexey Kardashevskiy 3273fc8c745dSAlexey Kardashevskiy if (spapr->vof) { 3274fc8c745dSAlexey Kardashevskiy vof_cleanup(spapr->vof); 3275fc8c745dSAlexey Kardashevskiy g_free(spapr->vof); 3276fc8c745dSAlexey Kardashevskiy spapr->vof = NULL; 3277fc8c745dSAlexey Kardashevskiy } 3278fc8c745dSAlexey Kardashevskiy if (!value) { 3279fc8c745dSAlexey Kardashevskiy return; 3280fc8c745dSAlexey Kardashevskiy } 3281fc8c745dSAlexey Kardashevskiy spapr->vof = g_malloc0(sizeof(*spapr->vof)); 3282fc8c745dSAlexey Kardashevskiy } 3283fc8c745dSAlexey Kardashevskiy 32843ba3d0bcSCédric Le Goater static char *spapr_get_ic_mode(Object *obj, Error **errp) 32853ba3d0bcSCédric Le Goater { 3286ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 32873ba3d0bcSCédric Le Goater 32883ba3d0bcSCédric Le Goater if (spapr->irq == &spapr_irq_xics_legacy) { 32893ba3d0bcSCédric Le Goater return g_strdup("legacy"); 32903ba3d0bcSCédric Le Goater } else if (spapr->irq == &spapr_irq_xics) { 32913ba3d0bcSCédric Le Goater return g_strdup("xics"); 32923ba3d0bcSCédric Le Goater } else if (spapr->irq == &spapr_irq_xive) { 32933ba3d0bcSCédric Le Goater return g_strdup("xive"); 329413db0cd9SCédric Le Goater } else if (spapr->irq == &spapr_irq_dual) { 329513db0cd9SCédric Le Goater return g_strdup("dual"); 32963ba3d0bcSCédric Le Goater } 32973ba3d0bcSCédric Le Goater g_assert_not_reached(); 32983ba3d0bcSCédric Le Goater } 32993ba3d0bcSCédric Le Goater 33003ba3d0bcSCédric Le Goater static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp) 33013ba3d0bcSCédric Le Goater { 3302ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 33033ba3d0bcSCédric Le Goater 330421df5e4fSGreg Kurz if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 330521df5e4fSGreg Kurz error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode"); 330621df5e4fSGreg Kurz return; 330721df5e4fSGreg Kurz } 330821df5e4fSGreg Kurz 33093ba3d0bcSCédric Le Goater /* The legacy IRQ backend can not be set */ 33103ba3d0bcSCédric Le Goater if (strcmp(value, "xics") == 0) { 33113ba3d0bcSCédric Le Goater spapr->irq = &spapr_irq_xics; 33123ba3d0bcSCédric Le Goater } else if (strcmp(value, "xive") == 0) { 33133ba3d0bcSCédric Le Goater spapr->irq = &spapr_irq_xive; 331413db0cd9SCédric Le Goater } else if (strcmp(value, "dual") == 0) { 331513db0cd9SCédric Le Goater spapr->irq = &spapr_irq_dual; 33163ba3d0bcSCédric Le Goater } else { 33173ba3d0bcSCédric Le Goater error_setg(errp, "Bad value for \"ic-mode\" property"); 33183ba3d0bcSCédric Le Goater } 33193ba3d0bcSCédric Le Goater } 33203ba3d0bcSCédric Le Goater 332127461d69SPrasad J Pandit static char *spapr_get_host_model(Object *obj, Error **errp) 332227461d69SPrasad J Pandit { 3323ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 332427461d69SPrasad J Pandit 332527461d69SPrasad J Pandit return g_strdup(spapr->host_model); 332627461d69SPrasad J Pandit } 332727461d69SPrasad J Pandit 332827461d69SPrasad J Pandit static void spapr_set_host_model(Object *obj, const char *value, Error **errp) 332927461d69SPrasad J Pandit { 3330ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 333127461d69SPrasad J Pandit 333227461d69SPrasad J Pandit g_free(spapr->host_model); 333327461d69SPrasad J Pandit spapr->host_model = g_strdup(value); 333427461d69SPrasad J Pandit } 333527461d69SPrasad J Pandit 333627461d69SPrasad J Pandit static char *spapr_get_host_serial(Object *obj, Error **errp) 333727461d69SPrasad J Pandit { 3338ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 333927461d69SPrasad J Pandit 334027461d69SPrasad J Pandit return g_strdup(spapr->host_serial); 334127461d69SPrasad J Pandit } 334227461d69SPrasad J Pandit 334327461d69SPrasad J Pandit static void spapr_set_host_serial(Object *obj, const char *value, Error **errp) 334427461d69SPrasad J Pandit { 3345ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 334627461d69SPrasad J Pandit 334727461d69SPrasad J Pandit g_free(spapr->host_serial); 334827461d69SPrasad J Pandit spapr->host_serial = g_strdup(value); 334927461d69SPrasad J Pandit } 335027461d69SPrasad J Pandit 3351bcb5ce08SDavid Gibson static void spapr_instance_init(Object *obj) 335223825581SEduardo Habkost { 3353ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3354ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 335555810e90SIgor Mammedov MachineState *ms = MACHINE(spapr); 335655810e90SIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(ms); 335755810e90SIgor Mammedov 335855810e90SIgor Mammedov /* 335955810e90SIgor Mammedov * NVDIMM support went live in 5.1 without considering that, in 336055810e90SIgor Mammedov * other archs, the user needs to enable NVDIMM support with the 336155810e90SIgor Mammedov * 'nvdimm' machine option and the default behavior is NVDIMM 336255810e90SIgor Mammedov * support disabled. It is too late to roll back to the standard 336355810e90SIgor Mammedov * behavior without breaking 5.1 guests. 336455810e90SIgor Mammedov */ 336555810e90SIgor Mammedov if (mc->nvdimm_supported) { 336655810e90SIgor Mammedov ms->nvdimms_state->is_enabled = true; 336755810e90SIgor Mammedov } 3368715c5407SDavid Gibson 3369715c5407SDavid Gibson spapr->htab_fd = -1; 3370f6229214SMichael Roth spapr->use_hotplug_event_source = true; 337107b10bc4SDaniel Henrique Barboza spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE); 337223825581SEduardo Habkost object_property_add_str(obj, "kvm-type", 3373d2623129SMarkus Armbruster spapr_get_kvm_type, spapr_set_kvm_type); 337449d2e648SMarcel Apfelbaum object_property_set_description(obj, "kvm-type", 337507b10bc4SDaniel Henrique Barboza "Specifies the KVM virtualization mode (auto," 337607b10bc4SDaniel Henrique Barboza " hv, pr). Defaults to 'auto'. This mode will use" 337707b10bc4SDaniel Henrique Barboza " any available KVM module loaded in the host," 337807b10bc4SDaniel Henrique Barboza " where kvm_hv takes precedence if both kvm_hv and" 337907b10bc4SDaniel Henrique Barboza " kvm_pr are loaded."); 3380f6229214SMichael Roth object_property_add_bool(obj, "modern-hotplug-events", 3381f6229214SMichael Roth spapr_get_modern_hotplug_events, 3382d2623129SMarkus Armbruster spapr_set_modern_hotplug_events); 3383f6229214SMichael Roth object_property_set_description(obj, "modern-hotplug-events", 3384f6229214SMichael Roth "Use dedicated hotplug event mechanism in" 3385f6229214SMichael Roth " place of standard EPOW events when possible" 33867eecec7dSMarkus Armbruster " (required for memory hot-unplug support)"); 33877843c0d6SDavid Gibson ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr, 338840c2281cSMarkus Armbruster "Maximum permitted CPU compatibility mode"); 338930f4b05bSDavid Gibson 339030f4b05bSDavid Gibson object_property_add_str(obj, "resize-hpt", 3391d2623129SMarkus Armbruster spapr_get_resize_hpt, spapr_set_resize_hpt); 339230f4b05bSDavid Gibson object_property_set_description(obj, "resize-hpt", 33937eecec7dSMarkus Armbruster "Resizing of the Hash Page Table (enabled, disabled, required)"); 339464a7b8deSFelipe Franciosi object_property_add_uint32_ptr(obj, "vsmt", 3395d2623129SMarkus Armbruster &spapr->vsmt, OBJ_PROP_FLAG_READWRITE); 3396fa98fbfcSSam Bobroff object_property_set_description(obj, "vsmt", 3397fa98fbfcSSam Bobroff "Virtual SMT: KVM behaves as if this were" 33987eecec7dSMarkus Armbruster " the host's SMT mode"); 339964a7b8deSFelipe Franciosi 3400fcad0d21SAlexey Kardashevskiy object_property_add_bool(obj, "vfio-no-msix-emulation", 3401d2623129SMarkus Armbruster spapr_get_msix_emulation, NULL); 34023ba3d0bcSCédric Le Goater 340364a7b8deSFelipe Franciosi object_property_add_uint64_ptr(obj, "kernel-addr", 3404d2623129SMarkus Armbruster &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE); 340587262806SAlexey Kardashevskiy object_property_set_description(obj, "kernel-addr", 340687262806SAlexey Kardashevskiy stringify(KERNEL_LOAD_ADDR) 34077eecec7dSMarkus Armbruster " for -kernel is the default"); 340887262806SAlexey Kardashevskiy spapr->kernel_addr = KERNEL_LOAD_ADDR; 3409fc8c745dSAlexey Kardashevskiy 3410fc8c745dSAlexey Kardashevskiy object_property_add_bool(obj, "x-vof", spapr_get_vof, spapr_set_vof); 3411fc8c745dSAlexey Kardashevskiy object_property_set_description(obj, "x-vof", 3412fc8c745dSAlexey Kardashevskiy "Enable Virtual Open Firmware (experimental)"); 3413fc8c745dSAlexey Kardashevskiy 34143ba3d0bcSCédric Le Goater /* The machine class defines the default interrupt controller mode */ 34153ba3d0bcSCédric Le Goater spapr->irq = smc->irq; 34163ba3d0bcSCédric Le Goater object_property_add_str(obj, "ic-mode", spapr_get_ic_mode, 3417d2623129SMarkus Armbruster spapr_set_ic_mode); 34183ba3d0bcSCédric Le Goater object_property_set_description(obj, "ic-mode", 34197eecec7dSMarkus Armbruster "Specifies the interrupt controller mode (xics, xive, dual)"); 342027461d69SPrasad J Pandit 342127461d69SPrasad J Pandit object_property_add_str(obj, "host-model", 3422d2623129SMarkus Armbruster spapr_get_host_model, spapr_set_host_model); 342327461d69SPrasad J Pandit object_property_set_description(obj, "host-model", 34247eecec7dSMarkus Armbruster "Host model to advertise in guest device tree"); 342527461d69SPrasad J Pandit object_property_add_str(obj, "host-serial", 3426d2623129SMarkus Armbruster spapr_get_host_serial, spapr_set_host_serial); 342727461d69SPrasad J Pandit object_property_set_description(obj, "host-serial", 34287eecec7dSMarkus Armbruster "Host serial number to advertise in guest device tree"); 342923825581SEduardo Habkost } 343023825581SEduardo Habkost 343187bbdd9cSDavid Gibson static void spapr_machine_finalizefn(Object *obj) 343287bbdd9cSDavid Gibson { 3433ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 343487bbdd9cSDavid Gibson 343587bbdd9cSDavid Gibson g_free(spapr->kvm_type); 343687bbdd9cSDavid Gibson } 343787bbdd9cSDavid Gibson 34381c7ad77eSNicholas Piggin void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg) 343934316482SAlexey Kardashevskiy { 34400e236d34SNicholas Piggin SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 3441b5b7f391SNicholas Piggin PowerPCCPU *cpu = POWERPC_CPU(cs); 3442b5b7f391SNicholas Piggin CPUPPCState *env = &cpu->env; 34430e236d34SNicholas Piggin 344434316482SAlexey Kardashevskiy cpu_synchronize_state(cs); 34450e236d34SNicholas Piggin /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */ 34460e236d34SNicholas Piggin if (spapr->fwnmi_system_reset_addr != -1) { 34470e236d34SNicholas Piggin uint64_t rtas_addr, addr; 34480e236d34SNicholas Piggin 34490e236d34SNicholas Piggin /* get rtas addr from fdt */ 34500e236d34SNicholas Piggin rtas_addr = spapr_get_rtas_addr(); 34510e236d34SNicholas Piggin if (!rtas_addr) { 34520e236d34SNicholas Piggin qemu_system_guest_panicked(NULL); 34530e236d34SNicholas Piggin return; 34540e236d34SNicholas Piggin } 34550e236d34SNicholas Piggin 34560e236d34SNicholas Piggin addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2; 34570e236d34SNicholas Piggin stq_be_phys(&address_space_memory, addr, env->gpr[3]); 34580e236d34SNicholas Piggin stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0); 34590e236d34SNicholas Piggin env->gpr[3] = addr; 34600e236d34SNicholas Piggin } 3461b5b7f391SNicholas Piggin ppc_cpu_do_system_reset(cs); 3462b5b7f391SNicholas Piggin if (spapr->fwnmi_system_reset_addr != -1) { 3463b5b7f391SNicholas Piggin env->nip = spapr->fwnmi_system_reset_addr; 3464b5b7f391SNicholas Piggin } 346534316482SAlexey Kardashevskiy } 346634316482SAlexey Kardashevskiy 346734316482SAlexey Kardashevskiy static void spapr_nmi(NMIState *n, int cpu_index, Error **errp) 346834316482SAlexey Kardashevskiy { 346934316482SAlexey Kardashevskiy CPUState *cs; 347034316482SAlexey Kardashevskiy 347134316482SAlexey Kardashevskiy CPU_FOREACH(cs) { 34721c7ad77eSNicholas Piggin async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); 347334316482SAlexey Kardashevskiy } 347434316482SAlexey Kardashevskiy } 347534316482SAlexey Kardashevskiy 3476ce2918cbSDavid Gibson int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 347762d38c9bSGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 347862d38c9bSGreg Kurz { 347962d38c9bSGreg Kurz uint64_t addr; 348062d38c9bSGreg Kurz uint32_t node; 348162d38c9bSGreg Kurz 348262d38c9bSGreg Kurz addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE; 348362d38c9bSGreg Kurz node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP, 348462d38c9bSGreg Kurz &error_abort); 3485f1aa45ffSDaniel Henrique Barboza *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr, 348662d38c9bSGreg Kurz SPAPR_MEMORY_BLOCK_SIZE); 348762d38c9bSGreg Kurz return 0; 348862d38c9bSGreg Kurz } 348962d38c9bSGreg Kurz 3490ea042c53SGreg Kurz static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size, 3491ea042c53SGreg Kurz bool dedicated_hp_event_source) 3492c20d332aSBharata B Rao { 3493ce2918cbSDavid Gibson SpaprDrc *drc; 3494c20d332aSBharata B Rao uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE; 349562d38c9bSGreg Kurz int i; 349679b78a6bSMichael Roth uint64_t addr = addr_start; 349794fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 3498c20d332aSBharata B Rao 3499c20d332aSBharata B Rao for (i = 0; i < nr_lmbs; i++) { 3500fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 3501c20d332aSBharata B Rao addr / SPAPR_MEMORY_BLOCK_SIZE); 3502c20d332aSBharata B Rao g_assert(drc); 3503c20d332aSBharata B Rao 3504ea042c53SGreg Kurz /* 3505ea042c53SGreg Kurz * memory_device_get_free_addr() provided a range of free addresses 3506ea042c53SGreg Kurz * that doesn't overlap with any existing mapping at pre-plug. The 3507ea042c53SGreg Kurz * corresponding LMB DRCs are thus assumed to be all attachable. 3508ea042c53SGreg Kurz */ 3509bc370a65SGreg Kurz spapr_drc_attach(drc, dev); 351094fd9cbaSLaurent Vivier if (!hotplugged) { 351194fd9cbaSLaurent Vivier spapr_drc_reset(drc); 351294fd9cbaSLaurent Vivier } 3513c20d332aSBharata B Rao addr += SPAPR_MEMORY_BLOCK_SIZE; 3514c20d332aSBharata B Rao } 35155dd5238cSJianjun Duan /* send hotplug notification to the 35165dd5238cSJianjun Duan * guest only in case of hotplugged memory 35175dd5238cSJianjun Duan */ 351894fd9cbaSLaurent Vivier if (hotplugged) { 351979b78a6bSMichael Roth if (dedicated_hp_event_source) { 3520fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 352179b78a6bSMichael Roth addr_start / SPAPR_MEMORY_BLOCK_SIZE); 352273231f7cSGreg Kurz g_assert(drc); 352379b78a6bSMichael Roth spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 352479b78a6bSMichael Roth nr_lmbs, 35250b55aa91SDavid Gibson spapr_drc_index(drc)); 352679b78a6bSMichael Roth } else { 352779b78a6bSMichael Roth spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB, 352879b78a6bSMichael Roth nr_lmbs); 352979b78a6bSMichael Roth } 3530c20d332aSBharata B Rao } 35315dd5238cSJianjun Duan } 3532c20d332aSBharata B Rao 3533ea042c53SGreg Kurz static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev) 3534c20d332aSBharata B Rao { 3535ce2918cbSDavid Gibson SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev); 3536c20d332aSBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 3537581778ddSGreg Kurz uint64_t size, addr; 3538581778ddSGreg Kurz int64_t slot; 3539ee3a71e3SShivaprasad G Bhat bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); 354004790978SThomas Huth 3541946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort); 3542df587133SThomas Huth 354384fd5496SGreg Kurz pc_dimm_plug(dimm, MACHINE(ms)); 3544c20d332aSBharata B Rao 3545ee3a71e3SShivaprasad G Bhat if (!is_nvdimm) { 35469ed442b8SMarc-André Lureau addr = object_property_get_uint(OBJECT(dimm), 3547271ced1dSGreg Kurz PC_DIMM_ADDR_PROP, &error_abort); 3548ea042c53SGreg Kurz spapr_add_lmbs(dev, addr, size, 3549ea042c53SGreg Kurz spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT)); 3550ee3a71e3SShivaprasad G Bhat } else { 3551581778ddSGreg Kurz slot = object_property_get_int(OBJECT(dimm), 3552271ced1dSGreg Kurz PC_DIMM_SLOT_PROP, &error_abort); 3553581778ddSGreg Kurz /* We should have valid slot number at this point */ 3554581778ddSGreg Kurz g_assert(slot >= 0); 3555ea042c53SGreg Kurz spapr_add_nvdimm(dev, slot); 3556160bb678SGreg Kurz } 35576e837f98SGreg Kurz } 3558c20d332aSBharata B Rao 3559c871bc70SLaurent Vivier static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3560c871bc70SLaurent Vivier Error **errp) 3561c871bc70SLaurent Vivier { 3562ce2918cbSDavid Gibson const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev); 3563ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3564ee3a71e3SShivaprasad G Bhat bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); 3565c871bc70SLaurent Vivier PCDIMMDevice *dimm = PC_DIMM(dev); 35668f1ffe5bSDavid Hildenbrand Error *local_err = NULL; 356704790978SThomas Huth uint64_t size; 3568123eec65SDavid Gibson Object *memdev; 3569123eec65SDavid Gibson hwaddr pagesize; 3570c871bc70SLaurent Vivier 35714e8a01bdSDavid Hildenbrand if (!smc->dr_lmb_enabled) { 35724e8a01bdSDavid Hildenbrand error_setg(errp, "Memory hotplug not supported for this machine"); 35734e8a01bdSDavid Hildenbrand return; 35744e8a01bdSDavid Hildenbrand } 35754e8a01bdSDavid Hildenbrand 3576946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err); 3577946d6154SDavid Hildenbrand if (local_err) { 3578946d6154SDavid Hildenbrand error_propagate(errp, local_err); 357904790978SThomas Huth return; 358004790978SThomas Huth } 358104790978SThomas Huth 3582beb6073fSDaniel Henrique Barboza if (is_nvdimm) { 3583451c6905SGreg Kurz if (!spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, errp)) { 3584ee3a71e3SShivaprasad G Bhat return; 3585ee3a71e3SShivaprasad G Bhat } 3586beb6073fSDaniel Henrique Barboza } else if (size % SPAPR_MEMORY_BLOCK_SIZE) { 3587beb6073fSDaniel Henrique Barboza error_setg(errp, "Hotplugged memory size must be a multiple of " 3588beb6073fSDaniel Henrique Barboza "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB); 3589beb6073fSDaniel Henrique Barboza return; 3590c871bc70SLaurent Vivier } 3591c871bc70SLaurent Vivier 3592123eec65SDavid Gibson memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, 3593123eec65SDavid Gibson &error_abort); 3594123eec65SDavid Gibson pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev)); 359535dce34fSGreg Kurz if (!spapr_check_pagesize(spapr, pagesize, errp)) { 35968f1ffe5bSDavid Hildenbrand return; 35978f1ffe5bSDavid Hildenbrand } 35988f1ffe5bSDavid Hildenbrand 3599fd3416f5SDavid Hildenbrand pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp); 3600c871bc70SLaurent Vivier } 3601c871bc70SLaurent Vivier 3602ce2918cbSDavid Gibson struct SpaprDimmState { 36030cffce56SDavid Gibson PCDIMMDevice *dimm; 3604cf632463SBharata B Rao uint32_t nr_lmbs; 3605ce2918cbSDavid Gibson QTAILQ_ENTRY(SpaprDimmState) next; 36060cffce56SDavid Gibson }; 36070cffce56SDavid Gibson 3608ce2918cbSDavid Gibson static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s, 36090cffce56SDavid Gibson PCDIMMDevice *dimm) 36100cffce56SDavid Gibson { 3611ce2918cbSDavid Gibson SpaprDimmState *dimm_state = NULL; 36120cffce56SDavid Gibson 36130cffce56SDavid Gibson QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) { 36140cffce56SDavid Gibson if (dimm_state->dimm == dimm) { 36150cffce56SDavid Gibson break; 36160cffce56SDavid Gibson } 36170cffce56SDavid Gibson } 36180cffce56SDavid Gibson return dimm_state; 36190cffce56SDavid Gibson } 36200cffce56SDavid Gibson 3621ce2918cbSDavid Gibson static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr, 36228d5981c4SBharata B Rao uint32_t nr_lmbs, 36238d5981c4SBharata B Rao PCDIMMDevice *dimm) 36240cffce56SDavid Gibson { 3625ce2918cbSDavid Gibson SpaprDimmState *ds = NULL; 36268d5981c4SBharata B Rao 36278d5981c4SBharata B Rao /* 36288d5981c4SBharata B Rao * If this request is for a DIMM whose removal had failed earlier 36298d5981c4SBharata B Rao * (due to guest's refusal to remove the LMBs), we would have this 36308d5981c4SBharata B Rao * dimm already in the pending_dimm_unplugs list. In that 36318d5981c4SBharata B Rao * case don't add again. 36328d5981c4SBharata B Rao */ 36338d5981c4SBharata B Rao ds = spapr_pending_dimm_unplugs_find(spapr, dimm); 36348d5981c4SBharata B Rao if (!ds) { 3635b21e2380SMarkus Armbruster ds = g_new0(SpaprDimmState, 1); 36368d5981c4SBharata B Rao ds->nr_lmbs = nr_lmbs; 36378d5981c4SBharata B Rao ds->dimm = dimm; 36388d5981c4SBharata B Rao QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next); 36398d5981c4SBharata B Rao } 36408d5981c4SBharata B Rao return ds; 36410cffce56SDavid Gibson } 36420cffce56SDavid Gibson 3643ce2918cbSDavid Gibson static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr, 3644ce2918cbSDavid Gibson SpaprDimmState *dimm_state) 36450cffce56SDavid Gibson { 36460cffce56SDavid Gibson QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next); 36470cffce56SDavid Gibson g_free(dimm_state); 36480cffce56SDavid Gibson } 3649cf632463SBharata B Rao 3650ce2918cbSDavid Gibson static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms, 365116ee9980SDaniel Henrique Barboza PCDIMMDevice *dimm) 365216ee9980SDaniel Henrique Barboza { 3653ce2918cbSDavid Gibson SpaprDrc *drc; 3654946d6154SDavid Hildenbrand uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm), 3655946d6154SDavid Hildenbrand &error_abort); 365616ee9980SDaniel Henrique Barboza uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 365716ee9980SDaniel Henrique Barboza uint32_t avail_lmbs = 0; 365816ee9980SDaniel Henrique Barboza uint64_t addr_start, addr; 365916ee9980SDaniel Henrique Barboza int i; 366016ee9980SDaniel Henrique Barboza 366165226afdSGreg Kurz addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, 366216ee9980SDaniel Henrique Barboza &error_abort); 366316ee9980SDaniel Henrique Barboza 366416ee9980SDaniel Henrique Barboza addr = addr_start; 366516ee9980SDaniel Henrique Barboza for (i = 0; i < nr_lmbs; i++) { 3666fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 366716ee9980SDaniel Henrique Barboza addr / SPAPR_MEMORY_BLOCK_SIZE); 366816ee9980SDaniel Henrique Barboza g_assert(drc); 3669454b580aSDavid Gibson if (drc->dev) { 367016ee9980SDaniel Henrique Barboza avail_lmbs++; 367116ee9980SDaniel Henrique Barboza } 367216ee9980SDaniel Henrique Barboza addr += SPAPR_MEMORY_BLOCK_SIZE; 367316ee9980SDaniel Henrique Barboza } 367416ee9980SDaniel Henrique Barboza 36758d5981c4SBharata B Rao return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm); 367616ee9980SDaniel Henrique Barboza } 367716ee9980SDaniel Henrique Barboza 3678eb7f80fdSDaniel Henrique Barboza void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev) 3679fe1831efSDaniel Henrique Barboza { 3680fe1831efSDaniel Henrique Barboza SpaprDimmState *ds; 3681fe1831efSDaniel Henrique Barboza PCDIMMDevice *dimm; 3682fe1831efSDaniel Henrique Barboza SpaprDrc *drc; 3683fe1831efSDaniel Henrique Barboza uint32_t nr_lmbs; 3684fe1831efSDaniel Henrique Barboza uint64_t size, addr_start, addr; 3685eb7f80fdSDaniel Henrique Barboza g_autofree char *qapi_error = NULL; 3686fe1831efSDaniel Henrique Barboza int i; 3687fe1831efSDaniel Henrique Barboza 3688fe1831efSDaniel Henrique Barboza if (!dev) { 3689fe1831efSDaniel Henrique Barboza return; 3690fe1831efSDaniel Henrique Barboza } 3691fe1831efSDaniel Henrique Barboza 3692fe1831efSDaniel Henrique Barboza dimm = PC_DIMM(dev); 3693fe1831efSDaniel Henrique Barboza ds = spapr_pending_dimm_unplugs_find(spapr, dimm); 3694fe1831efSDaniel Henrique Barboza 3695fe1831efSDaniel Henrique Barboza /* 3696fe1831efSDaniel Henrique Barboza * 'ds == NULL' would mean that the DIMM doesn't have a pending 3697fe1831efSDaniel Henrique Barboza * unplug state, but one of its DRC is marked as unplug_requested. 3698fe1831efSDaniel Henrique Barboza * This is bad and weird enough to g_assert() out. 3699fe1831efSDaniel Henrique Barboza */ 3700fe1831efSDaniel Henrique Barboza g_assert(ds); 3701fe1831efSDaniel Henrique Barboza 3702fe1831efSDaniel Henrique Barboza spapr_pending_dimm_unplugs_remove(spapr, ds); 3703fe1831efSDaniel Henrique Barboza 3704fe1831efSDaniel Henrique Barboza size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort); 3705fe1831efSDaniel Henrique Barboza nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 3706fe1831efSDaniel Henrique Barboza 3707fe1831efSDaniel Henrique Barboza addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, 3708fe1831efSDaniel Henrique Barboza &error_abort); 3709fe1831efSDaniel Henrique Barboza 3710fe1831efSDaniel Henrique Barboza addr = addr_start; 3711fe1831efSDaniel Henrique Barboza for (i = 0; i < nr_lmbs; i++) { 3712fe1831efSDaniel Henrique Barboza drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 3713fe1831efSDaniel Henrique Barboza addr / SPAPR_MEMORY_BLOCK_SIZE); 3714fe1831efSDaniel Henrique Barboza g_assert(drc); 3715fe1831efSDaniel Henrique Barboza 3716fe1831efSDaniel Henrique Barboza drc->unplug_requested = false; 3717fe1831efSDaniel Henrique Barboza addr += SPAPR_MEMORY_BLOCK_SIZE; 3718fe1831efSDaniel Henrique Barboza } 3719eb7f80fdSDaniel Henrique Barboza 3720eb7f80fdSDaniel Henrique Barboza /* 3721eb7f80fdSDaniel Henrique Barboza * Tell QAPI that something happened and the memory 37224b08cd56SDaniel Henrique Barboza * hotunplug wasn't successful. Keep sending 37234b08cd56SDaniel Henrique Barboza * MEM_UNPLUG_ERROR even while sending 37244b08cd56SDaniel Henrique Barboza * DEVICE_UNPLUG_GUEST_ERROR until the deprecation of 37254b08cd56SDaniel Henrique Barboza * MEM_UNPLUG_ERROR is due. 3726eb7f80fdSDaniel Henrique Barboza */ 3727eb7f80fdSDaniel Henrique Barboza qapi_error = g_strdup_printf("Memory hotunplug rejected by the guest " 3728eb7f80fdSDaniel Henrique Barboza "for device %s", dev->id); 37294b08cd56SDaniel Henrique Barboza 373044d886abSDaniel Henrique Barboza qapi_event_send_mem_unplug_error(dev->id ? : "", qapi_error); 37314b08cd56SDaniel Henrique Barboza 3732047f2ca1SMarkus Armbruster qapi_event_send_device_unplug_guest_error(dev->id, 37334b08cd56SDaniel Henrique Barboza dev->canonical_path); 3734fe1831efSDaniel Henrique Barboza } 3735fe1831efSDaniel Henrique Barboza 373631834723SDaniel Henrique Barboza /* Callback to be called during DRC release. */ 373731834723SDaniel Henrique Barboza void spapr_lmb_release(DeviceState *dev) 3738cf632463SBharata B Rao { 37393ec71474SDavid Hildenbrand HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3740ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl); 3741ce2918cbSDavid Gibson SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); 3742cf632463SBharata B Rao 374316ee9980SDaniel Henrique Barboza /* This information will get lost if a migration occurs 374416ee9980SDaniel Henrique Barboza * during the unplug process. In this case recover it. */ 374516ee9980SDaniel Henrique Barboza if (ds == NULL) { 374616ee9980SDaniel Henrique Barboza ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev)); 37478d5981c4SBharata B Rao g_assert(ds); 3748454b580aSDavid Gibson /* The DRC being examined by the caller at least must be counted */ 3749454b580aSDavid Gibson g_assert(ds->nr_lmbs); 375016ee9980SDaniel Henrique Barboza } 3751454b580aSDavid Gibson 3752454b580aSDavid Gibson if (--ds->nr_lmbs) { 3753cf632463SBharata B Rao return; 3754cf632463SBharata B Rao } 3755cf632463SBharata B Rao 3756cf632463SBharata B Rao /* 3757cf632463SBharata B Rao * Now that all the LMBs have been removed by the guest, call the 37583ec71474SDavid Hildenbrand * unplug handler chain. This can never fail. 3759cf632463SBharata B Rao */ 37603ec71474SDavid Hildenbrand hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 376107578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 37623ec71474SDavid Hildenbrand } 37633ec71474SDavid Hildenbrand 37643ec71474SDavid Hildenbrand static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 37653ec71474SDavid Hildenbrand { 3766ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3767ce2918cbSDavid Gibson SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); 37683ec71474SDavid Hildenbrand 3769df2d7ca7SGreg Kurz /* We really shouldn't get this far without anything to unplug */ 3770df2d7ca7SGreg Kurz g_assert(ds); 3771df2d7ca7SGreg Kurz 3772fd3416f5SDavid Hildenbrand pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev)); 3773981c3dcdSMarkus Armbruster qdev_unrealize(dev); 37742a129767SDaniel Henrique Barboza spapr_pending_dimm_unplugs_remove(spapr, ds); 3775cf632463SBharata B Rao } 3776cf632463SBharata B Rao 3777cf632463SBharata B Rao static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev, 3778cf632463SBharata B Rao DeviceState *dev, Error **errp) 3779cf632463SBharata B Rao { 3780ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3781cf632463SBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 378204790978SThomas Huth uint32_t nr_lmbs; 378304790978SThomas Huth uint64_t size, addr_start, addr; 37840cffce56SDavid Gibson int i; 3785ce2918cbSDavid Gibson SpaprDrc *drc; 378604790978SThomas Huth 3787ee3a71e3SShivaprasad G Bhat if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { 3788dcfe4805SMarkus Armbruster error_setg(errp, "nvdimm device hot unplug is not supported yet."); 3789dcfe4805SMarkus Armbruster return; 3790ee3a71e3SShivaprasad G Bhat } 3791ee3a71e3SShivaprasad G Bhat 3792946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort); 379304790978SThomas Huth nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 379404790978SThomas Huth 37959ed442b8SMarc-André Lureau addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, 3796271ced1dSGreg Kurz &error_abort); 3797cf632463SBharata B Rao 37982a129767SDaniel Henrique Barboza /* 37992a129767SDaniel Henrique Barboza * An existing pending dimm state for this DIMM means that there is an 38002a129767SDaniel Henrique Barboza * unplug operation in progress, waiting for the spapr_lmb_release 38012a129767SDaniel Henrique Barboza * callback to complete the job (BQL can't cover that far). In this case, 38022a129767SDaniel Henrique Barboza * bail out to avoid detaching DRCs that were already released. 38032a129767SDaniel Henrique Barboza */ 38042a129767SDaniel Henrique Barboza if (spapr_pending_dimm_unplugs_find(spapr, dimm)) { 3805dcfe4805SMarkus Armbruster error_setg(errp, "Memory unplug already in progress for device %s", 38062a129767SDaniel Henrique Barboza dev->id); 3807dcfe4805SMarkus Armbruster return; 38082a129767SDaniel Henrique Barboza } 38092a129767SDaniel Henrique Barboza 38108d5981c4SBharata B Rao spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm); 38110cffce56SDavid Gibson 38120cffce56SDavid Gibson addr = addr_start; 38130cffce56SDavid Gibson for (i = 0; i < nr_lmbs; i++) { 3814fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 38150cffce56SDavid Gibson addr / SPAPR_MEMORY_BLOCK_SIZE); 38160cffce56SDavid Gibson g_assert(drc); 38170cffce56SDavid Gibson 3818a03509cdSDaniel Henrique Barboza spapr_drc_unplug_request(drc); 38190cffce56SDavid Gibson addr += SPAPR_MEMORY_BLOCK_SIZE; 38200cffce56SDavid Gibson } 38210cffce56SDavid Gibson 3822fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 38230cffce56SDavid Gibson addr_start / SPAPR_MEMORY_BLOCK_SIZE); 38240cffce56SDavid Gibson spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 38250b55aa91SDavid Gibson nr_lmbs, spapr_drc_index(drc)); 3826cf632463SBharata B Rao } 3827cf632463SBharata B Rao 3828765d1bddSDavid Gibson /* Callback to be called during DRC release. */ 3829765d1bddSDavid Gibson void spapr_core_release(DeviceState *dev) 3830ff9006ddSIgor Mammedov { 3831a4261be1SDavid Hildenbrand HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3832a4261be1SDavid Hildenbrand 3833a4261be1SDavid Hildenbrand /* Call the unplug handler chain. This can never fail. */ 3834a4261be1SDavid Hildenbrand hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 383507578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 3836a4261be1SDavid Hildenbrand } 3837a4261be1SDavid Hildenbrand 3838a4261be1SDavid Hildenbrand static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 3839a4261be1SDavid Hildenbrand { 3840a4261be1SDavid Hildenbrand MachineState *ms = MACHINE(hotplug_dev); 3841ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); 3842ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3843535455fdSIgor Mammedov CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); 3844ff9006ddSIgor Mammedov 384546f7afa3SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 3846ce2918cbSDavid Gibson SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); 384746f7afa3SGreg Kurz int i; 384846f7afa3SGreg Kurz 384946f7afa3SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 385094ad93bdSGreg Kurz CPUState *cs = CPU(sc->threads[i]); 385146f7afa3SGreg Kurz 385246f7afa3SGreg Kurz pre_2_10_vmstate_register_dummy_icp(cs->cpu_index); 385346f7afa3SGreg Kurz } 385446f7afa3SGreg Kurz } 385546f7afa3SGreg Kurz 385607572c06SGreg Kurz assert(core_slot); 3857535455fdSIgor Mammedov core_slot->cpu = NULL; 3858981c3dcdSMarkus Armbruster qdev_unrealize(dev); 3859ff9006ddSIgor Mammedov } 3860ff9006ddSIgor Mammedov 3861115debf2SIgor Mammedov static 3862115debf2SIgor Mammedov void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev, 3863ff9006ddSIgor Mammedov Error **errp) 3864ff9006ddSIgor Mammedov { 3865ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3866535455fdSIgor Mammedov int index; 3867ce2918cbSDavid Gibson SpaprDrc *drc; 3868535455fdSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3869ff9006ddSIgor Mammedov 3870535455fdSIgor Mammedov if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) { 3871535455fdSIgor Mammedov error_setg(errp, "Unable to find CPU core with core-id: %d", 3872535455fdSIgor Mammedov cc->core_id); 3873535455fdSIgor Mammedov return; 3874535455fdSIgor Mammedov } 3875ff9006ddSIgor Mammedov if (index == 0) { 3876ff9006ddSIgor Mammedov error_setg(errp, "Boot CPU core may not be unplugged"); 3877ff9006ddSIgor Mammedov return; 3878ff9006ddSIgor Mammedov } 3879ff9006ddSIgor Mammedov 38805d0fb150SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, 38815d0fb150SGreg Kurz spapr_vcpu_id(spapr, cc->core_id)); 3882ff9006ddSIgor Mammedov g_assert(drc); 3883ff9006ddSIgor Mammedov 388447c8c915SGreg Kurz if (!spapr_drc_unplug_requested(drc)) { 3885a03509cdSDaniel Henrique Barboza spapr_drc_unplug_request(drc); 3886ff9006ddSIgor Mammedov } 38872b18fc79SDaniel Henrique Barboza 38882b18fc79SDaniel Henrique Barboza /* 38892b18fc79SDaniel Henrique Barboza * spapr_hotplug_req_remove_by_index is left unguarded, out of the 38902b18fc79SDaniel Henrique Barboza * "!spapr_drc_unplug_requested" check, to allow for multiple IRQ 38912b18fc79SDaniel Henrique Barboza * pulses removing the same CPU. Otherwise, in an failed hotunplug 38922b18fc79SDaniel Henrique Barboza * attempt (e.g. the kernel will refuse to remove the last online 38932b18fc79SDaniel Henrique Barboza * CPU), we will never attempt it again because unplug_requested 38942b18fc79SDaniel Henrique Barboza * will still be 'true' in that case. 38952b18fc79SDaniel Henrique Barboza */ 38962b18fc79SDaniel Henrique Barboza spapr_hotplug_req_remove_by_index(drc); 389747c8c915SGreg Kurz } 3898ff9006ddSIgor Mammedov 3899ce2918cbSDavid Gibson int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 3900345b12b9SGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 3901345b12b9SGreg Kurz { 3902ce2918cbSDavid Gibson SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev); 3903345b12b9SGreg Kurz CPUState *cs = CPU(core->threads[0]); 3904345b12b9SGreg Kurz PowerPCCPU *cpu = POWERPC_CPU(cs); 3905345b12b9SGreg Kurz DeviceClass *dc = DEVICE_GET_CLASS(cs); 3906345b12b9SGreg Kurz int id = spapr_get_vcpu_id(cpu); 39077265bc3eSDaniel Henrique Barboza g_autofree char *nodename = NULL; 3908345b12b9SGreg Kurz int offset; 3909345b12b9SGreg Kurz 3910345b12b9SGreg Kurz nodename = g_strdup_printf("%s@%x", dc->fw_name, id); 3911345b12b9SGreg Kurz offset = fdt_add_subnode(fdt, 0, nodename); 3912345b12b9SGreg Kurz 391391335a5eSDavid Gibson spapr_dt_cpu(cs, fdt, offset, spapr); 3914345b12b9SGreg Kurz 3915a85bb34eSDaniel Henrique Barboza /* 3916a85bb34eSDaniel Henrique Barboza * spapr_dt_cpu() does not fill the 'name' property in the 3917a85bb34eSDaniel Henrique Barboza * CPU node. The function is called during boot process, before 3918a85bb34eSDaniel Henrique Barboza * and after CAS, and overwriting the 'name' property written 3919a85bb34eSDaniel Henrique Barboza * by SLOF is not allowed. 3920a85bb34eSDaniel Henrique Barboza * 3921a85bb34eSDaniel Henrique Barboza * Write it manually after spapr_dt_cpu(). This makes the hotplug 3922a85bb34eSDaniel Henrique Barboza * CPUs more compatible with the coldplugged ones, which have 3923a85bb34eSDaniel Henrique Barboza * the 'name' property. Linux Kernel also relies on this 3924a85bb34eSDaniel Henrique Barboza * property to identify CPU nodes. 3925a85bb34eSDaniel Henrique Barboza */ 3926a85bb34eSDaniel Henrique Barboza _FDT((fdt_setprop_string(fdt, offset, "name", nodename))); 3927a85bb34eSDaniel Henrique Barboza 3928345b12b9SGreg Kurz *fdt_start_offset = offset; 3929345b12b9SGreg Kurz return 0; 3930345b12b9SGreg Kurz } 3931345b12b9SGreg Kurz 3932f9b43958SGreg Kurz static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) 3933ff9006ddSIgor Mammedov { 3934ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3935ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(spapr); 3936ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 3937ce2918cbSDavid Gibson SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev)); 3938ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3939345b12b9SGreg Kurz CPUState *cs; 3940ce2918cbSDavid Gibson SpaprDrc *drc; 3941535455fdSIgor Mammedov CPUArchId *core_slot; 3942535455fdSIgor Mammedov int index; 394394fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 3944b1e81567SGreg Kurz int i; 3945ff9006ddSIgor Mammedov 3946535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 3947f9b43958SGreg Kurz g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */ 3948f9b43958SGreg Kurz 39495d0fb150SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, 39505d0fb150SGreg Kurz spapr_vcpu_id(spapr, cc->core_id)); 3951ff9006ddSIgor Mammedov 3952c5514d0eSIgor Mammedov g_assert(drc || !mc->has_hotpluggable_cpus); 3953ff9006ddSIgor Mammedov 3954e49c63d5SGreg Kurz if (drc) { 3955f9b43958SGreg Kurz /* 3956f9b43958SGreg Kurz * spapr_core_pre_plug() already buys us this is a brand new 3957f9b43958SGreg Kurz * core being plugged into a free slot. Nothing should already 3958f9b43958SGreg Kurz * be attached to the corresponding DRC. 3959f9b43958SGreg Kurz */ 3960bc370a65SGreg Kurz spapr_drc_attach(drc, dev); 3961ff9006ddSIgor Mammedov 396294fd9cbaSLaurent Vivier if (hotplugged) { 3963ff9006ddSIgor Mammedov /* 396494fd9cbaSLaurent Vivier * Send hotplug notification interrupt to the guest only 396594fd9cbaSLaurent Vivier * in case of hotplugged CPUs. 3966ff9006ddSIgor Mammedov */ 3967ff9006ddSIgor Mammedov spapr_hotplug_req_add_by_index(drc); 396894fd9cbaSLaurent Vivier } else { 396994fd9cbaSLaurent Vivier spapr_drc_reset(drc); 3970ff9006ddSIgor Mammedov } 397194fd9cbaSLaurent Vivier } 397294fd9cbaSLaurent Vivier 3973535455fdSIgor Mammedov core_slot->cpu = OBJECT(dev); 397446f7afa3SGreg Kurz 3975b1e81567SGreg Kurz /* 3976b1e81567SGreg Kurz * Set compatibility mode to match the boot CPU, which was either set 397737641213SGreg Kurz * by the machine reset code or by CAS. This really shouldn't fail at 397837641213SGreg Kurz * this point. 3979b1e81567SGreg Kurz */ 3980b1e81567SGreg Kurz if (hotplugged) { 3981b1e81567SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 398237641213SGreg Kurz ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr, 398337641213SGreg Kurz &error_abort); 3984b1e81567SGreg Kurz } 3985b1e81567SGreg Kurz } 39861b4ab514SGreg Kurz 39871b4ab514SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 39881b4ab514SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 39891b4ab514SGreg Kurz cs = CPU(core->threads[i]); 39901b4ab514SGreg Kurz pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); 39911b4ab514SGreg Kurz } 39921b4ab514SGreg Kurz } 3993ff9006ddSIgor Mammedov } 3994ff9006ddSIgor Mammedov 3995ff9006ddSIgor Mammedov static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3996ff9006ddSIgor Mammedov Error **errp) 3997ff9006ddSIgor Mammedov { 3998ff9006ddSIgor Mammedov MachineState *machine = MACHINE(OBJECT(hotplug_dev)); 3999ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); 4000ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 40012e9c10ebSIgor Mammedov const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type); 4002ff9006ddSIgor Mammedov const char *type = object_get_typename(OBJECT(dev)); 4003535455fdSIgor Mammedov CPUArchId *core_slot; 4004535455fdSIgor Mammedov int index; 4005fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 4006ff9006ddSIgor Mammedov 4007c5514d0eSIgor Mammedov if (dev->hotplugged && !mc->has_hotpluggable_cpus) { 4008dcfe4805SMarkus Armbruster error_setg(errp, "CPU hotplug not supported for this machine"); 4009dcfe4805SMarkus Armbruster return; 4010ff9006ddSIgor Mammedov } 4011ff9006ddSIgor Mammedov 4012ff9006ddSIgor Mammedov if (strcmp(base_core_type, type)) { 4013dcfe4805SMarkus Armbruster error_setg(errp, "CPU core type should be %s", base_core_type); 4014dcfe4805SMarkus Armbruster return; 4015ff9006ddSIgor Mammedov } 4016ff9006ddSIgor Mammedov 4017ff9006ddSIgor Mammedov if (cc->core_id % smp_threads) { 4018dcfe4805SMarkus Armbruster error_setg(errp, "invalid core id %d", cc->core_id); 4019dcfe4805SMarkus Armbruster return; 4020ff9006ddSIgor Mammedov } 4021ff9006ddSIgor Mammedov 4022459264efSDavid Gibson /* 4023459264efSDavid Gibson * In general we should have homogeneous threads-per-core, but old 4024459264efSDavid Gibson * (pre hotplug support) machine types allow the last core to have 4025459264efSDavid Gibson * reduced threads as a compatibility hack for when we allowed 4026459264efSDavid Gibson * total vcpus not a multiple of threads-per-core. 4027459264efSDavid Gibson */ 4028459264efSDavid Gibson if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) { 4029dcfe4805SMarkus Armbruster error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads, 4030dcfe4805SMarkus Armbruster smp_threads); 4031dcfe4805SMarkus Armbruster return; 40328149e299SDavid Gibson } 40338149e299SDavid Gibson 4034535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 4035535455fdSIgor Mammedov if (!core_slot) { 4036dcfe4805SMarkus Armbruster error_setg(errp, "core id %d out of range", cc->core_id); 4037dcfe4805SMarkus Armbruster return; 4038ff9006ddSIgor Mammedov } 4039ff9006ddSIgor Mammedov 4040535455fdSIgor Mammedov if (core_slot->cpu) { 4041dcfe4805SMarkus Armbruster error_setg(errp, "core %d already populated", cc->core_id); 4042dcfe4805SMarkus Armbruster return; 4043ff9006ddSIgor Mammedov } 4044ff9006ddSIgor Mammedov 4045dcfe4805SMarkus Armbruster numa_cpu_pre_plug(core_slot, dev, errp); 4046ff9006ddSIgor Mammedov } 4047ff9006ddSIgor Mammedov 4048ce2918cbSDavid Gibson int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 4049bb2bdd81SGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 4050bb2bdd81SGreg Kurz { 4051ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev); 4052bb2bdd81SGreg Kurz int intc_phandle; 4053bb2bdd81SGreg Kurz 4054bb2bdd81SGreg Kurz intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp); 4055bb2bdd81SGreg Kurz if (intc_phandle <= 0) { 4056bb2bdd81SGreg Kurz return -1; 4057bb2bdd81SGreg Kurz } 4058bb2bdd81SGreg Kurz 40598cbe71ecSDavid Gibson if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) { 4060bb2bdd81SGreg Kurz error_setg(errp, "unable to create FDT node for PHB %d", sphb->index); 4061bb2bdd81SGreg Kurz return -1; 4062bb2bdd81SGreg Kurz } 4063bb2bdd81SGreg Kurz 4064bb2bdd81SGreg Kurz /* generally SLOF creates these, for hotplug it's up to QEMU */ 4065bb2bdd81SGreg Kurz _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci")); 4066bb2bdd81SGreg Kurz 4067bb2bdd81SGreg Kurz return 0; 4068bb2bdd81SGreg Kurz } 4069bb2bdd81SGreg Kurz 4070f5598c92SGreg Kurz static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 4071bb2bdd81SGreg Kurz Error **errp) 4072bb2bdd81SGreg Kurz { 4073ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 4074ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 4075ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 4076bb2bdd81SGreg Kurz const unsigned windows_supported = spapr_phb_windows_supported(sphb); 40779a070699SGreg Kurz SpaprDrc *drc; 4078bb2bdd81SGreg Kurz 4079bb2bdd81SGreg Kurz if (dev->hotplugged && !smc->dr_phb_enabled) { 4080bb2bdd81SGreg Kurz error_setg(errp, "PHB hotplug not supported for this machine"); 4081f5598c92SGreg Kurz return false; 4082bb2bdd81SGreg Kurz } 4083bb2bdd81SGreg Kurz 4084bb2bdd81SGreg Kurz if (sphb->index == (uint32_t)-1) { 4085bb2bdd81SGreg Kurz error_setg(errp, "\"index\" for PAPR PHB is mandatory"); 4086f5598c92SGreg Kurz return false; 4087bb2bdd81SGreg Kurz } 4088bb2bdd81SGreg Kurz 40899a070699SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 40909a070699SGreg Kurz if (drc && drc->dev) { 40919a070699SGreg Kurz error_setg(errp, "PHB %d already attached", sphb->index); 40929a070699SGreg Kurz return false; 40939a070699SGreg Kurz } 40949a070699SGreg Kurz 4095bb2bdd81SGreg Kurz /* 4096bb2bdd81SGreg Kurz * This will check that sphb->index doesn't exceed the maximum number of 4097bb2bdd81SGreg Kurz * PHBs for the current machine type. 4098bb2bdd81SGreg Kurz */ 4099f5598c92SGreg Kurz return 4100bb2bdd81SGreg Kurz smc->phb_placement(spapr, sphb->index, 4101bb2bdd81SGreg Kurz &sphb->buid, &sphb->io_win_addr, 4102bb2bdd81SGreg Kurz &sphb->mem_win_addr, &sphb->mem64_win_addr, 4103ec132efaSAlexey Kardashevskiy windows_supported, sphb->dma_liobn, 4104ec132efaSAlexey Kardashevskiy &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr, 4105ec132efaSAlexey Kardashevskiy errp); 4106bb2bdd81SGreg Kurz } 4107bb2bdd81SGreg Kurz 41089a070699SGreg Kurz static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev) 4109bb2bdd81SGreg Kurz { 4110ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 4111ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 4112ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 4113ce2918cbSDavid Gibson SpaprDrc *drc; 4114bb2bdd81SGreg Kurz bool hotplugged = spapr_drc_hotplugged(dev); 4115bb2bdd81SGreg Kurz 4116bb2bdd81SGreg Kurz if (!smc->dr_phb_enabled) { 4117bb2bdd81SGreg Kurz return; 4118bb2bdd81SGreg Kurz } 4119bb2bdd81SGreg Kurz 4120bb2bdd81SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 4121bb2bdd81SGreg Kurz /* hotplug hooks should check it's enabled before getting this far */ 4122bb2bdd81SGreg Kurz assert(drc); 4123bb2bdd81SGreg Kurz 41249a070699SGreg Kurz /* spapr_phb_pre_plug() already checked the DRC is attachable */ 4125bc370a65SGreg Kurz spapr_drc_attach(drc, dev); 4126bb2bdd81SGreg Kurz 4127bb2bdd81SGreg Kurz if (hotplugged) { 4128bb2bdd81SGreg Kurz spapr_hotplug_req_add_by_index(drc); 4129bb2bdd81SGreg Kurz } else { 4130bb2bdd81SGreg Kurz spapr_drc_reset(drc); 4131bb2bdd81SGreg Kurz } 4132bb2bdd81SGreg Kurz } 4133bb2bdd81SGreg Kurz 4134bb2bdd81SGreg Kurz void spapr_phb_release(DeviceState *dev) 4135bb2bdd81SGreg Kurz { 4136bb2bdd81SGreg Kurz HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 4137bb2bdd81SGreg Kurz 4138bb2bdd81SGreg Kurz hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 413907578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 4140bb2bdd81SGreg Kurz } 4141bb2bdd81SGreg Kurz 4142bb2bdd81SGreg Kurz static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 4143bb2bdd81SGreg Kurz { 4144981c3dcdSMarkus Armbruster qdev_unrealize(dev); 4145bb2bdd81SGreg Kurz } 4146bb2bdd81SGreg Kurz 4147bb2bdd81SGreg Kurz static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev, 4148bb2bdd81SGreg Kurz DeviceState *dev, Error **errp) 4149bb2bdd81SGreg Kurz { 4150ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 4151ce2918cbSDavid Gibson SpaprDrc *drc; 4152bb2bdd81SGreg Kurz 4153bb2bdd81SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 4154bb2bdd81SGreg Kurz assert(drc); 4155bb2bdd81SGreg Kurz 4156bb2bdd81SGreg Kurz if (!spapr_drc_unplug_requested(drc)) { 4157a03509cdSDaniel Henrique Barboza spapr_drc_unplug_request(drc); 4158bb2bdd81SGreg Kurz spapr_hotplug_req_remove_by_index(drc); 41597420033eSDaniel Henrique Barboza } else { 41607420033eSDaniel Henrique Barboza error_setg(errp, 41617420033eSDaniel Henrique Barboza "PCI Host Bridge unplug already in progress for device %s", 41627420033eSDaniel Henrique Barboza dev->id); 4163bb2bdd81SGreg Kurz } 4164bb2bdd81SGreg Kurz } 4165bb2bdd81SGreg Kurz 4166ac96807bSGreg Kurz static 4167ac96807bSGreg Kurz bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 41680fb6bd07SMichael Roth Error **errp) 41690fb6bd07SMichael Roth { 41700fb6bd07SMichael Roth SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 4171ac96807bSGreg Kurz 4172ac96807bSGreg Kurz if (spapr->tpm_proxy != NULL) { 4173ac96807bSGreg Kurz error_setg(errp, "Only one TPM proxy can be specified for this machine"); 4174ac96807bSGreg Kurz return false; 4175ac96807bSGreg Kurz } 4176ac96807bSGreg Kurz 4177ac96807bSGreg Kurz return true; 4178ac96807bSGreg Kurz } 4179ac96807bSGreg Kurz 4180ac96807bSGreg Kurz static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev) 4181ac96807bSGreg Kurz { 4182ac96807bSGreg Kurz SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 41830fb6bd07SMichael Roth SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev); 41840fb6bd07SMichael Roth 4185ac96807bSGreg Kurz /* Already checked in spapr_tpm_proxy_pre_plug() */ 4186ac96807bSGreg Kurz g_assert(spapr->tpm_proxy == NULL); 41870fb6bd07SMichael Roth 41880fb6bd07SMichael Roth spapr->tpm_proxy = tpm_proxy; 41890fb6bd07SMichael Roth } 41900fb6bd07SMichael Roth 41910fb6bd07SMichael Roth static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 41920fb6bd07SMichael Roth { 41930fb6bd07SMichael Roth SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 41940fb6bd07SMichael Roth 4195981c3dcdSMarkus Armbruster qdev_unrealize(dev); 41960fb6bd07SMichael Roth object_unparent(OBJECT(dev)); 41970fb6bd07SMichael Roth spapr->tpm_proxy = NULL; 41980fb6bd07SMichael Roth } 41990fb6bd07SMichael Roth 4200c20d332aSBharata B Rao static void spapr_machine_device_plug(HotplugHandler *hotplug_dev, 4201c20d332aSBharata B Rao DeviceState *dev, Error **errp) 4202c20d332aSBharata B Rao { 4203c20d332aSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 4204ea042c53SGreg Kurz spapr_memory_plug(hotplug_dev, dev); 4205af81cf32SBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4206f9b43958SGreg Kurz spapr_core_plug(hotplug_dev, dev); 4207bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 42089a070699SGreg Kurz spapr_phb_plug(hotplug_dev, dev); 42090fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 4210ac96807bSGreg Kurz spapr_tpm_proxy_plug(hotplug_dev, dev); 4211c20d332aSBharata B Rao } 4212c20d332aSBharata B Rao } 4213c20d332aSBharata B Rao 421488432f44SDavid Hildenbrand static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev, 421588432f44SDavid Hildenbrand DeviceState *dev, Error **errp) 421688432f44SDavid Hildenbrand { 42173ec71474SDavid Hildenbrand if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 42183ec71474SDavid Hildenbrand spapr_memory_unplug(hotplug_dev, dev); 4219a4261be1SDavid Hildenbrand } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4220a4261be1SDavid Hildenbrand spapr_core_unplug(hotplug_dev, dev); 4221bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4222bb2bdd81SGreg Kurz spapr_phb_unplug(hotplug_dev, dev); 42230fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 42240fb6bd07SMichael Roth spapr_tpm_proxy_unplug(hotplug_dev, dev); 42253ec71474SDavid Hildenbrand } 422688432f44SDavid Hildenbrand } 422788432f44SDavid Hildenbrand 422873598c75SGreg Kurz bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr) 422973598c75SGreg Kurz { 423073598c75SGreg Kurz return spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT) || 423173598c75SGreg Kurz /* 423273598c75SGreg Kurz * CAS will process all pending unplug requests. 423373598c75SGreg Kurz * 423473598c75SGreg Kurz * HACK: a guest could theoretically have cleared all bits in OV5, 423573598c75SGreg Kurz * but none of the guests we care for do. 423673598c75SGreg Kurz */ 423773598c75SGreg Kurz spapr_ovec_empty(spapr->ov5_cas); 423873598c75SGreg Kurz } 423973598c75SGreg Kurz 4240cf632463SBharata B Rao static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev, 4241cf632463SBharata B Rao DeviceState *dev, Error **errp) 4242cf632463SBharata B Rao { 4243ce2918cbSDavid Gibson SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev)); 4244c86c1affSDaniel Henrique Barboza MachineClass *mc = MACHINE_GET_CLASS(sms); 4245ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4246cf632463SBharata B Rao 4247cf632463SBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 424873598c75SGreg Kurz if (spapr_memory_hot_unplug_supported(sms)) { 4249cf632463SBharata B Rao spapr_memory_unplug_request(hotplug_dev, dev, errp); 4250cf632463SBharata B Rao } else { 4251cf632463SBharata B Rao error_setg(errp, "Memory hot unplug not supported for this guest"); 4252cf632463SBharata B Rao } 42536f4b5c3eSBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4254c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 42556f4b5c3eSBharata B Rao error_setg(errp, "CPU hot unplug not supported on this machine"); 42566f4b5c3eSBharata B Rao return; 42576f4b5c3eSBharata B Rao } 4258115debf2SIgor Mammedov spapr_core_unplug_request(hotplug_dev, dev, errp); 4259bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4260bb2bdd81SGreg Kurz if (!smc->dr_phb_enabled) { 4261bb2bdd81SGreg Kurz error_setg(errp, "PHB hot unplug not supported on this machine"); 4262bb2bdd81SGreg Kurz return; 4263bb2bdd81SGreg Kurz } 4264bb2bdd81SGreg Kurz spapr_phb_unplug_request(hotplug_dev, dev, errp); 42650fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 42660fb6bd07SMichael Roth spapr_tpm_proxy_unplug(hotplug_dev, dev); 4267c20d332aSBharata B Rao } 4268c20d332aSBharata B Rao } 4269c20d332aSBharata B Rao 427094a94e4cSBharata B Rao static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev, 427194a94e4cSBharata B Rao DeviceState *dev, Error **errp) 427294a94e4cSBharata B Rao { 4273c871bc70SLaurent Vivier if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 4274c871bc70SLaurent Vivier spapr_memory_pre_plug(hotplug_dev, dev, errp); 4275c871bc70SLaurent Vivier } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 427694a94e4cSBharata B Rao spapr_core_pre_plug(hotplug_dev, dev, errp); 4277bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4278bb2bdd81SGreg Kurz spapr_phb_pre_plug(hotplug_dev, dev, errp); 4279ac96807bSGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 4280ac96807bSGreg Kurz spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp); 428194a94e4cSBharata B Rao } 428294a94e4cSBharata B Rao } 428394a94e4cSBharata B Rao 42847ebaf795SBharata B Rao static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine, 4285c20d332aSBharata B Rao DeviceState *dev) 4286c20d332aSBharata B Rao { 428794a94e4cSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || 4288bb2bdd81SGreg Kurz object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) || 42890fb6bd07SMichael Roth object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) || 42900fb6bd07SMichael Roth object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 4291c20d332aSBharata B Rao return HOTPLUG_HANDLER(machine); 4292c20d332aSBharata B Rao } 4293cb600087SDavid Gibson if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 4294cb600087SDavid Gibson PCIDevice *pcidev = PCI_DEVICE(dev); 4295cb600087SDavid Gibson PCIBus *root = pci_device_root_bus(pcidev); 4296cb600087SDavid Gibson SpaprPhbState *phb = 4297cb600087SDavid Gibson (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent), 4298cb600087SDavid Gibson TYPE_SPAPR_PCI_HOST_BRIDGE); 4299cb600087SDavid Gibson 4300cb600087SDavid Gibson if (phb) { 4301cb600087SDavid Gibson return HOTPLUG_HANDLER(phb); 4302cb600087SDavid Gibson } 4303cb600087SDavid Gibson } 4304c20d332aSBharata B Rao return NULL; 4305c20d332aSBharata B Rao } 4306c20d332aSBharata B Rao 4307ea089eebSIgor Mammedov static CpuInstanceProperties 4308ea089eebSIgor Mammedov spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index) 430920bb648dSDavid Gibson { 4310ea089eebSIgor Mammedov CPUArchId *core_slot; 4311ea089eebSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 4312ea089eebSIgor Mammedov 4313ea089eebSIgor Mammedov /* make sure possible_cpu are intialized */ 4314ea089eebSIgor Mammedov mc->possible_cpu_arch_ids(machine); 4315ea089eebSIgor Mammedov /* get CPU core slot containing thread that matches cpu_index */ 4316ea089eebSIgor Mammedov core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL); 4317ea089eebSIgor Mammedov assert(core_slot); 4318ea089eebSIgor Mammedov return core_slot->props; 431920bb648dSDavid Gibson } 432020bb648dSDavid Gibson 432179e07936SIgor Mammedov static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx) 432279e07936SIgor Mammedov { 4323aa570207STao Xu return idx / ms->smp.cores % ms->numa_state->num_nodes; 432479e07936SIgor Mammedov } 432579e07936SIgor Mammedov 4326535455fdSIgor Mammedov static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) 4327535455fdSIgor Mammedov { 4328535455fdSIgor Mammedov int i; 4329fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 4330fe6b6346SLike Xu unsigned int smp_cpus = machine->smp.cpus; 4331d342eb76SIgor Mammedov const char *core_type; 4332fe6b6346SLike Xu int spapr_max_cores = machine->smp.max_cpus / smp_threads; 4333535455fdSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 4334535455fdSIgor Mammedov 4335c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 4336535455fdSIgor Mammedov spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads; 4337535455fdSIgor Mammedov } 4338535455fdSIgor Mammedov if (machine->possible_cpus) { 4339535455fdSIgor Mammedov assert(machine->possible_cpus->len == spapr_max_cores); 4340535455fdSIgor Mammedov return machine->possible_cpus; 4341535455fdSIgor Mammedov } 4342535455fdSIgor Mammedov 4343d342eb76SIgor Mammedov core_type = spapr_get_cpu_core_type(machine->cpu_type); 4344d342eb76SIgor Mammedov if (!core_type) { 4345d342eb76SIgor Mammedov error_report("Unable to find sPAPR CPU Core definition"); 4346d342eb76SIgor Mammedov exit(1); 4347d342eb76SIgor Mammedov } 4348d342eb76SIgor Mammedov 4349535455fdSIgor Mammedov machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + 4350535455fdSIgor Mammedov sizeof(CPUArchId) * spapr_max_cores); 4351535455fdSIgor Mammedov machine->possible_cpus->len = spapr_max_cores; 4352535455fdSIgor Mammedov for (i = 0; i < machine->possible_cpus->len; i++) { 4353535455fdSIgor Mammedov int core_id = i * smp_threads; 4354535455fdSIgor Mammedov 4355d342eb76SIgor Mammedov machine->possible_cpus->cpus[i].type = core_type; 4356f2d672c2SIgor Mammedov machine->possible_cpus->cpus[i].vcpus_count = smp_threads; 4357535455fdSIgor Mammedov machine->possible_cpus->cpus[i].arch_id = core_id; 4358535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.has_core_id = true; 4359535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.core_id = core_id; 4360535455fdSIgor Mammedov } 4361535455fdSIgor Mammedov return machine->possible_cpus; 4362535455fdSIgor Mammedov } 4363535455fdSIgor Mammedov 4364f5598c92SGreg Kurz static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, 4365daa23699SDavid Gibson uint64_t *buid, hwaddr *pio, 4366daa23699SDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 4367ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4368ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 43696737d9adSDavid Gibson { 4370357d1e3bSDavid Gibson /* 4371357d1e3bSDavid Gibson * New-style PHB window placement. 4372357d1e3bSDavid Gibson * 4373357d1e3bSDavid Gibson * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window 4374357d1e3bSDavid Gibson * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO 4375357d1e3bSDavid Gibson * windows. 4376357d1e3bSDavid Gibson * 4377357d1e3bSDavid Gibson * Some guest kernels can't work with MMIO windows above 1<<46 4378357d1e3bSDavid Gibson * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB 4379357d1e3bSDavid Gibson * 4380357d1e3bSDavid Gibson * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each 4381357d1e3bSDavid Gibson * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the 4382357d1e3bSDavid Gibson * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the 4383357d1e3bSDavid Gibson * 1TiB 64-bit MMIO windows for each PHB. 4384357d1e3bSDavid Gibson */ 43856737d9adSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 43866737d9adSDavid Gibson int i; 43876737d9adSDavid Gibson 4388357d1e3bSDavid Gibson /* Sanity check natural alignments */ 4389357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 4390357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 4391357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0); 4392357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0); 4393357d1e3bSDavid Gibson /* Sanity check bounds */ 439425e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) > 439525e6a118SMichael S. Tsirkin SPAPR_PCI_MEM32_WIN_SIZE); 439625e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) > 439725e6a118SMichael S. Tsirkin SPAPR_PCI_MEM64_WIN_SIZE); 43982efff1c0SDavid Gibson 439925e6a118SMichael S. Tsirkin if (index >= SPAPR_MAX_PHBS) { 440025e6a118SMichael S. Tsirkin error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)", 440125e6a118SMichael S. Tsirkin SPAPR_MAX_PHBS - 1); 4402f5598c92SGreg Kurz return false; 44036737d9adSDavid Gibson } 44046737d9adSDavid Gibson 44056737d9adSDavid Gibson *buid = base_buid + index; 44066737d9adSDavid Gibson for (i = 0; i < n_dma; ++i) { 44076737d9adSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 44086737d9adSDavid Gibson } 44096737d9adSDavid Gibson 4410357d1e3bSDavid Gibson *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; 4411357d1e3bSDavid Gibson *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; 4412357d1e3bSDavid Gibson *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; 4413ec132efaSAlexey Kardashevskiy 4414ec132efaSAlexey Kardashevskiy *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE; 4415ec132efaSAlexey Kardashevskiy *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE; 4416f5598c92SGreg Kurz return true; 44176737d9adSDavid Gibson } 44186737d9adSDavid Gibson 44197844e12bSCédric Le Goater static ICSState *spapr_ics_get(XICSFabric *dev, int irq) 44207844e12bSCédric Le Goater { 4421ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(dev); 44227844e12bSCédric Le Goater 44237844e12bSCédric Le Goater return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL; 44247844e12bSCédric Le Goater } 44257844e12bSCédric Le Goater 44267844e12bSCédric Le Goater static void spapr_ics_resend(XICSFabric *dev) 44277844e12bSCédric Le Goater { 4428ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(dev); 44297844e12bSCédric Le Goater 44307844e12bSCédric Le Goater ics_resend(spapr->ics); 44317844e12bSCédric Le Goater } 44327844e12bSCédric Le Goater 443381210c20SSam Bobroff static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id) 4434b2fc59aaSCédric Le Goater { 44352e886fb3SSam Bobroff PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); 4436b2fc59aaSCédric Le Goater 4437a28b9a5aSCédric Le Goater return cpu ? spapr_cpu_state(cpu)->icp : NULL; 4438b2fc59aaSCédric Le Goater } 4439b2fc59aaSCédric Le Goater 44406449da45SCédric Le Goater static void spapr_pic_print_info(InterruptStatsProvider *obj, 44416449da45SCédric Le Goater Monitor *mon) 44426449da45SCédric Le Goater { 4443ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 44446449da45SCédric Le Goater 4445328d8eb2SDavid Gibson spapr_irq_print_info(spapr, mon); 4446f041d6afSGreg Kurz monitor_printf(mon, "irqchip: %s\n", 4447f041d6afSGreg Kurz kvm_irqchip_in_kernel() ? "in-kernel" : "emulated"); 44486449da45SCédric Le Goater } 44496449da45SCédric Le Goater 4450baa45b17SCédric Le Goater /* 4451baa45b17SCédric Le Goater * This is a XIVE only operation 4452baa45b17SCédric Le Goater */ 4453932de7aeSCédric Le Goater static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, 4454932de7aeSCédric Le Goater uint8_t nvt_blk, uint32_t nvt_idx, 4455932de7aeSCédric Le Goater bool cam_ignore, uint8_t priority, 4456932de7aeSCédric Le Goater uint32_t logic_serv, XiveTCTXMatch *match) 4457932de7aeSCédric Le Goater { 4458932de7aeSCédric Le Goater SpaprMachineState *spapr = SPAPR_MACHINE(xfb); 4459baa45b17SCédric Le Goater XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc); 4460932de7aeSCédric Le Goater XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); 4461932de7aeSCédric Le Goater int count; 4462932de7aeSCédric Le Goater 4463932de7aeSCédric Le Goater count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, 4464932de7aeSCédric Le Goater priority, logic_serv, match); 4465932de7aeSCédric Le Goater if (count < 0) { 4466932de7aeSCédric Le Goater return count; 4467932de7aeSCédric Le Goater } 4468932de7aeSCédric Le Goater 4469932de7aeSCédric Le Goater /* 4470932de7aeSCédric Le Goater * When we implement the save and restore of the thread interrupt 4471932de7aeSCédric Le Goater * contexts in the enter/exit CPU handlers of the machine and the 4472932de7aeSCédric Le Goater * escalations in QEMU, we should be able to handle non dispatched 4473932de7aeSCédric Le Goater * vCPUs. 4474932de7aeSCédric Le Goater * 4475932de7aeSCédric Le Goater * Until this is done, the sPAPR machine should find at least one 4476932de7aeSCédric Le Goater * matching context always. 4477932de7aeSCédric Le Goater */ 4478932de7aeSCédric Le Goater if (count == 0) { 4479932de7aeSCédric Le Goater qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n", 4480932de7aeSCédric Le Goater nvt_blk, nvt_idx); 4481932de7aeSCédric Le Goater } 4482932de7aeSCédric Le Goater 4483932de7aeSCédric Le Goater return count; 4484932de7aeSCédric Le Goater } 4485932de7aeSCédric Le Goater 448614bb4486SGreg Kurz int spapr_get_vcpu_id(PowerPCCPU *cpu) 44872e886fb3SSam Bobroff { 4488b1a568c1SGreg Kurz return cpu->vcpu_id; 44892e886fb3SSam Bobroff } 44902e886fb3SSam Bobroff 4491cfdc5274SGreg Kurz bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp) 4492648edb64SGreg Kurz { 4493ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 4494fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 4495648edb64SGreg Kurz int vcpu_id; 4496648edb64SGreg Kurz 44975d0fb150SGreg Kurz vcpu_id = spapr_vcpu_id(spapr, cpu_index); 4498648edb64SGreg Kurz 4499648edb64SGreg Kurz if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) { 4500648edb64SGreg Kurz error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id); 4501648edb64SGreg Kurz error_append_hint(errp, "Adjust the number of cpus to %d " 4502648edb64SGreg Kurz "or try to raise the number of threads per core\n", 4503fe6b6346SLike Xu vcpu_id * ms->smp.threads / spapr->vsmt); 4504cfdc5274SGreg Kurz return false; 4505648edb64SGreg Kurz } 4506648edb64SGreg Kurz 4507648edb64SGreg Kurz cpu->vcpu_id = vcpu_id; 4508cfdc5274SGreg Kurz return true; 4509648edb64SGreg Kurz } 4510648edb64SGreg Kurz 45112e886fb3SSam Bobroff PowerPCCPU *spapr_find_cpu(int vcpu_id) 45122e886fb3SSam Bobroff { 45132e886fb3SSam Bobroff CPUState *cs; 45142e886fb3SSam Bobroff 45152e886fb3SSam Bobroff CPU_FOREACH(cs) { 45162e886fb3SSam Bobroff PowerPCCPU *cpu = POWERPC_CPU(cs); 45172e886fb3SSam Bobroff 451814bb4486SGreg Kurz if (spapr_get_vcpu_id(cpu) == vcpu_id) { 45192e886fb3SSam Bobroff return cpu; 45202e886fb3SSam Bobroff } 45212e886fb3SSam Bobroff } 45222e886fb3SSam Bobroff 45232e886fb3SSam Bobroff return NULL; 45242e886fb3SSam Bobroff } 45252e886fb3SSam Bobroff 45267cebc5dbSNicholas Piggin static bool spapr_cpu_in_nested(PowerPCCPU *cpu) 45277cebc5dbSNicholas Piggin { 4528120f738aSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 4529120f738aSNicholas Piggin 4530120f738aSNicholas Piggin return spapr_cpu->in_nested; 45317cebc5dbSNicholas Piggin } 45327cebc5dbSNicholas Piggin 453303ef074cSNicholas Piggin static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) 453403ef074cSNicholas Piggin { 453503ef074cSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 453603ef074cSNicholas Piggin 453703ef074cSNicholas Piggin /* These are only called by TCG, KVM maintains dispatch state */ 453803ef074cSNicholas Piggin 45393a6e6224SNicholas Piggin spapr_cpu->prod = false; 454003ef074cSNicholas Piggin if (spapr_cpu->vpa_addr) { 454103ef074cSNicholas Piggin CPUState *cs = CPU(cpu); 454203ef074cSNicholas Piggin uint32_t dispatch; 454303ef074cSNicholas Piggin 454403ef074cSNicholas Piggin dispatch = ldl_be_phys(cs->as, 454503ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); 454603ef074cSNicholas Piggin dispatch++; 454703ef074cSNicholas Piggin if ((dispatch & 1) != 0) { 454803ef074cSNicholas Piggin qemu_log_mask(LOG_GUEST_ERROR, 454903ef074cSNicholas Piggin "VPA: incorrect dispatch counter value for " 455003ef074cSNicholas Piggin "dispatched partition %u, correcting.\n", dispatch); 455103ef074cSNicholas Piggin dispatch++; 455203ef074cSNicholas Piggin } 455303ef074cSNicholas Piggin stl_be_phys(cs->as, 455403ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); 455503ef074cSNicholas Piggin } 455603ef074cSNicholas Piggin } 455703ef074cSNicholas Piggin 455803ef074cSNicholas Piggin static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) 455903ef074cSNicholas Piggin { 456003ef074cSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 456103ef074cSNicholas Piggin 456203ef074cSNicholas Piggin if (spapr_cpu->vpa_addr) { 456303ef074cSNicholas Piggin CPUState *cs = CPU(cpu); 456403ef074cSNicholas Piggin uint32_t dispatch; 456503ef074cSNicholas Piggin 456603ef074cSNicholas Piggin dispatch = ldl_be_phys(cs->as, 456703ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); 456803ef074cSNicholas Piggin dispatch++; 456903ef074cSNicholas Piggin if ((dispatch & 1) != 1) { 457003ef074cSNicholas Piggin qemu_log_mask(LOG_GUEST_ERROR, 457103ef074cSNicholas Piggin "VPA: incorrect dispatch counter value for " 457203ef074cSNicholas Piggin "preempted partition %u, correcting.\n", dispatch); 457303ef074cSNicholas Piggin dispatch++; 457403ef074cSNicholas Piggin } 457503ef074cSNicholas Piggin stl_be_phys(cs->as, 457603ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); 457703ef074cSNicholas Piggin } 457803ef074cSNicholas Piggin } 457903ef074cSNicholas Piggin 458029ee3247SAlexey Kardashevskiy static void spapr_machine_class_init(ObjectClass *oc, void *data) 458153018216SPaolo Bonzini { 458229ee3247SAlexey Kardashevskiy MachineClass *mc = MACHINE_CLASS(oc); 4583ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc); 458471461b0fSAlexey Kardashevskiy FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); 458534316482SAlexey Kardashevskiy NMIClass *nc = NMI_CLASS(oc); 4586c20d332aSBharata B Rao HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); 45871d1be34dSDavid Gibson PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); 45887844e12bSCédric Le Goater XICSFabricClass *xic = XICS_FABRIC_CLASS(oc); 45896449da45SCédric Le Goater InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); 4590932de7aeSCédric Le Goater XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc); 4591fc8c745dSAlexey Kardashevskiy VofMachineIfClass *vmc = VOF_MACHINE_CLASS(oc); 459229ee3247SAlexey Kardashevskiy 45930eb9054cSDavid Gibson mc->desc = "pSeries Logical Partition (PAPR compliant)"; 4594907aac2fSMark Cave-Ayland mc->ignore_boot_device_suffixes = true; 4595fc9f38c3SDavid Gibson 4596fc9f38c3SDavid Gibson /* 4597fc9f38c3SDavid Gibson * We set up the default / latest behaviour here. The class_init 4598fc9f38c3SDavid Gibson * functions for the specific versioned machine types can override 4599fc9f38c3SDavid Gibson * these details for backwards compatibility 4600fc9f38c3SDavid Gibson */ 4601bcb5ce08SDavid Gibson mc->init = spapr_machine_init; 4602bcb5ce08SDavid Gibson mc->reset = spapr_machine_reset; 4603958db90cSMarcel Apfelbaum mc->block_default_type = IF_SCSI; 46045642e451SDaniel Henrique Barboza 46055642e451SDaniel Henrique Barboza /* 46065642e451SDaniel Henrique Barboza * Setting max_cpus to INT32_MAX. Both KVM and TCG max_cpus values 46075642e451SDaniel Henrique Barboza * should be limited by the host capability instead of hardcoded. 46085642e451SDaniel Henrique Barboza * max_cpus for KVM guests will be checked in kvm_init(), and TCG 46095642e451SDaniel Henrique Barboza * guests are welcome to have as many CPUs as the host are capable 46105642e451SDaniel Henrique Barboza * of emulate. 46115642e451SDaniel Henrique Barboza */ 46125642e451SDaniel Henrique Barboza mc->max_cpus = INT32_MAX; 46135642e451SDaniel Henrique Barboza 4614958db90cSMarcel Apfelbaum mc->no_parallel = 1; 46155b2128d2SAlexander Graf mc->default_boot_order = ""; 4616d23b6caaSPhilippe Mathieu-Daudé mc->default_ram_size = 512 * MiB; 4617ab74e543SIgor Mammedov mc->default_ram_id = "ppc_spapr.ram"; 461829f9cef3SSebastian Bauer mc->default_display = "std"; 4619958db90cSMarcel Apfelbaum mc->kvm_type = spapr_kvm_type; 46207da79a16SEduardo Habkost machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE); 4621e4024630SLaurent Vivier mc->pci_allow_0_address = true; 4622debbdc00SIgor Mammedov assert(!mc->get_hotplug_handler); 46237ebaf795SBharata B Rao mc->get_hotplug_handler = spapr_get_hotplug_handler; 462494a94e4cSBharata B Rao hc->pre_plug = spapr_machine_device_pre_plug; 4625c20d332aSBharata B Rao hc->plug = spapr_machine_device_plug; 4626ea089eebSIgor Mammedov mc->cpu_index_to_instance_props = spapr_cpu_index_to_props; 462779e07936SIgor Mammedov mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id; 4628535455fdSIgor Mammedov mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids; 4629cf632463SBharata B Rao hc->unplug_request = spapr_machine_device_unplug_request; 463088432f44SDavid Hildenbrand hc->unplug = spapr_machine_device_unplug; 463100b4fbe2SMarcel Apfelbaum 4632fc9f38c3SDavid Gibson smc->dr_lmb_enabled = true; 4633fea35ca4SAlexey Kardashevskiy smc->update_dt_enabled = true; 4634*277ee172SNicholas Piggin mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2"); 4635c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = true; 4636ee3a71e3SShivaprasad G Bhat mc->nvdimm_supported = true; 463752b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; 463871461b0fSAlexey Kardashevskiy fwc->get_dev_path = spapr_get_fw_dev_path; 463934316482SAlexey Kardashevskiy nc->nmi_monitor_handler = spapr_nmi; 46406737d9adSDavid Gibson smc->phb_placement = spapr_phb_placement; 46417cebc5dbSNicholas Piggin vhc->cpu_in_nested = spapr_cpu_in_nested; 4642120f738aSNicholas Piggin vhc->deliver_hv_excp = spapr_exit_nested; 46431d1be34dSDavid Gibson vhc->hypercall = emulate_spapr_hypercall; 4644e57ca75cSDavid Gibson vhc->hpt_mask = spapr_hpt_mask; 4645e57ca75cSDavid Gibson vhc->map_hptes = spapr_map_hptes; 4646e57ca75cSDavid Gibson vhc->unmap_hptes = spapr_unmap_hptes; 4647a2dd4e83SBenjamin Herrenschmidt vhc->hpte_set_c = spapr_hpte_set_c; 4648a2dd4e83SBenjamin Herrenschmidt vhc->hpte_set_r = spapr_hpte_set_r; 464979825f4dSBenjamin Herrenschmidt vhc->get_pate = spapr_get_pate; 46501ec26c75SGreg Kurz vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr; 465103ef074cSNicholas Piggin vhc->cpu_exec_enter = spapr_cpu_exec_enter; 465203ef074cSNicholas Piggin vhc->cpu_exec_exit = spapr_cpu_exec_exit; 46537844e12bSCédric Le Goater xic->ics_get = spapr_ics_get; 46547844e12bSCédric Le Goater xic->ics_resend = spapr_ics_resend; 4655b2fc59aaSCédric Le Goater xic->icp_get = spapr_icp_get; 46566449da45SCédric Le Goater ispc->print_info = spapr_pic_print_info; 465755641213SLaurent Vivier /* Force NUMA node memory size to be a multiple of 465855641213SLaurent Vivier * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity 465955641213SLaurent Vivier * in which LMBs are represented and hot-added 466055641213SLaurent Vivier */ 466155641213SLaurent Vivier mc->numa_mem_align_shift = 28; 46620533ef5fSTao Xu mc->auto_enable_numa = true; 466333face6bSDavid Gibson 46644e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF; 46654e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON; 46664e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON; 46672782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; 46682782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; 46692782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND; 46702309832aSDavid Gibson smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */ 4671b9a477b7SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF; 4672edaa7995SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON; 467337965dfeSDavid Gibson smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON; 46748af7e1feSNicholas Piggin smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON; 467582123b75SBharata B Rao smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF; 4676ccc5a4c5SNicholas Piggin 4677ccc5a4c5SNicholas Piggin /* 4678ccc5a4c5SNicholas Piggin * This cap specifies whether the AIL 3 mode for 4679ccc5a4c5SNicholas Piggin * H_SET_RESOURCE is supported. The default is modified 4680ccc5a4c5SNicholas Piggin * by default_caps_with_cpu(). 4681ccc5a4c5SNicholas Piggin */ 4682ccc5a4c5SNicholas Piggin smc->default_caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_ON; 468340c2281cSMarkus Armbruster spapr_caps_add_properties(smc); 4684bd94bc06SCédric Le Goater smc->irq = &spapr_irq_dual; 4685dae5e39aSMichael Roth smc->dr_phb_enabled = true; 46866c3829a2SAlexey Kardashevskiy smc->linux_pci_probe = true; 468729cb4187SGreg Kurz smc->smp_threads_vsmt = true; 468854255c1fSDavid Gibson smc->nr_xirqs = SPAPR_NR_XIRQS; 4689932de7aeSCédric Le Goater xfc->match_nvt = spapr_match_nvt; 4690fc8c745dSAlexey Kardashevskiy vmc->client_architecture_support = spapr_vof_client_architecture_support; 4691fc8c745dSAlexey Kardashevskiy vmc->quiesce = spapr_vof_quiesce; 4692fc8c745dSAlexey Kardashevskiy vmc->setprop = spapr_vof_setprop; 469353018216SPaolo Bonzini } 469453018216SPaolo Bonzini 469529ee3247SAlexey Kardashevskiy static const TypeInfo spapr_machine_info = { 469629ee3247SAlexey Kardashevskiy .name = TYPE_SPAPR_MACHINE, 469729ee3247SAlexey Kardashevskiy .parent = TYPE_MACHINE, 46984aee7362SDavid Gibson .abstract = true, 4699ce2918cbSDavid Gibson .instance_size = sizeof(SpaprMachineState), 4700bcb5ce08SDavid Gibson .instance_init = spapr_instance_init, 470187bbdd9cSDavid Gibson .instance_finalize = spapr_machine_finalizefn, 4702ce2918cbSDavid Gibson .class_size = sizeof(SpaprMachineClass), 470329ee3247SAlexey Kardashevskiy .class_init = spapr_machine_class_init, 470471461b0fSAlexey Kardashevskiy .interfaces = (InterfaceInfo[]) { 470571461b0fSAlexey Kardashevskiy { TYPE_FW_PATH_PROVIDER }, 470634316482SAlexey Kardashevskiy { TYPE_NMI }, 4707c20d332aSBharata B Rao { TYPE_HOTPLUG_HANDLER }, 47081d1be34dSDavid Gibson { TYPE_PPC_VIRTUAL_HYPERVISOR }, 47097844e12bSCédric Le Goater { TYPE_XICS_FABRIC }, 47106449da45SCédric Le Goater { TYPE_INTERRUPT_STATS_PROVIDER }, 4711932de7aeSCédric Le Goater { TYPE_XIVE_FABRIC }, 4712fc8c745dSAlexey Kardashevskiy { TYPE_VOF_MACHINE_IF }, 471371461b0fSAlexey Kardashevskiy { } 471471461b0fSAlexey Kardashevskiy }, 471529ee3247SAlexey Kardashevskiy }; 471629ee3247SAlexey Kardashevskiy 4717a7849268SMichael S. Tsirkin static void spapr_machine_latest_class_options(MachineClass *mc) 4718a7849268SMichael S. Tsirkin { 4719a7849268SMichael S. Tsirkin mc->alias = "pseries"; 4720ea0ac7f6SPhilippe Mathieu-Daudé mc->is_default = true; 4721a7849268SMichael S. Tsirkin } 4722a7849268SMichael S. Tsirkin 4723fccbc785SDavid Gibson #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \ 47245013c547SDavid Gibson static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \ 47255013c547SDavid Gibson void *data) \ 47265013c547SDavid Gibson { \ 47275013c547SDavid Gibson MachineClass *mc = MACHINE_CLASS(oc); \ 47285013c547SDavid Gibson spapr_machine_##suffix##_class_options(mc); \ 4729fccbc785SDavid Gibson if (latest) { \ 4730a7849268SMichael S. Tsirkin spapr_machine_latest_class_options(mc); \ 4731fccbc785SDavid Gibson } \ 47325013c547SDavid Gibson } \ 47335013c547SDavid Gibson static const TypeInfo spapr_machine_##suffix##_info = { \ 47345013c547SDavid Gibson .name = MACHINE_TYPE_NAME("pseries-" verstr), \ 47355013c547SDavid Gibson .parent = TYPE_SPAPR_MACHINE, \ 47365013c547SDavid Gibson .class_init = spapr_machine_##suffix##_class_init, \ 47375013c547SDavid Gibson }; \ 47385013c547SDavid Gibson static void spapr_machine_register_##suffix(void) \ 47395013c547SDavid Gibson { \ 47405013c547SDavid Gibson type_register(&spapr_machine_##suffix##_info); \ 47415013c547SDavid Gibson } \ 47420e6aac87SEduardo Habkost type_init(spapr_machine_register_##suffix) 47435013c547SDavid Gibson 47441c5f29bbSDavid Gibson /* 4745f9be4771SCornelia Huck * pseries-8.1 47463eb74d20SCornelia Huck */ 4747f9be4771SCornelia Huck static void spapr_machine_8_1_class_options(MachineClass *mc) 47483eb74d20SCornelia Huck { 47493eb74d20SCornelia Huck /* Defaults for the latest behaviour inherited from the base class */ 47503eb74d20SCornelia Huck } 47513eb74d20SCornelia Huck 4752f9be4771SCornelia Huck DEFINE_SPAPR_MACHINE(8_1, "8.1", true); 4753f9be4771SCornelia Huck 4754f9be4771SCornelia Huck /* 4755f9be4771SCornelia Huck * pseries-8.0 4756f9be4771SCornelia Huck */ 4757f9be4771SCornelia Huck static void spapr_machine_8_0_class_options(MachineClass *mc) 4758f9be4771SCornelia Huck { 4759f9be4771SCornelia Huck spapr_machine_8_1_class_options(mc); 4760f9be4771SCornelia Huck compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len); 4761f9be4771SCornelia Huck } 4762f9be4771SCornelia Huck 4763f9be4771SCornelia Huck DEFINE_SPAPR_MACHINE(8_0, "8.0", false); 4764db723c80SCornelia Huck 4765db723c80SCornelia Huck /* 4766db723c80SCornelia Huck * pseries-7.2 4767db723c80SCornelia Huck */ 4768db723c80SCornelia Huck static void spapr_machine_7_2_class_options(MachineClass *mc) 4769db723c80SCornelia Huck { 4770db723c80SCornelia Huck spapr_machine_8_0_class_options(mc); 4771db723c80SCornelia Huck compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len); 4772db723c80SCornelia Huck } 4773db723c80SCornelia Huck 4774db723c80SCornelia Huck DEFINE_SPAPR_MACHINE(7_2, "7.2", false); 4775f514e147SCornelia Huck 4776f514e147SCornelia Huck /* 4777f514e147SCornelia Huck * pseries-7.1 4778f514e147SCornelia Huck */ 4779f514e147SCornelia Huck static void spapr_machine_7_1_class_options(MachineClass *mc) 4780f514e147SCornelia Huck { 4781f514e147SCornelia Huck spapr_machine_7_2_class_options(mc); 4782f514e147SCornelia Huck compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len); 4783f514e147SCornelia Huck } 4784f514e147SCornelia Huck 4785f514e147SCornelia Huck DEFINE_SPAPR_MACHINE(7_1, "7.1", false); 47860ca70366SCornelia Huck 47870ca70366SCornelia Huck /* 47880ca70366SCornelia Huck * pseries-7.0 47890ca70366SCornelia Huck */ 47900ca70366SCornelia Huck static void spapr_machine_7_0_class_options(MachineClass *mc) 47910ca70366SCornelia Huck { 47920ca70366SCornelia Huck spapr_machine_7_1_class_options(mc); 47930ca70366SCornelia Huck compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len); 47940ca70366SCornelia Huck } 47950ca70366SCornelia Huck 47960ca70366SCornelia Huck DEFINE_SPAPR_MACHINE(7_0, "7.0", false); 479701854af2SCornelia Huck 479801854af2SCornelia Huck /* 479901854af2SCornelia Huck * pseries-6.2 480001854af2SCornelia Huck */ 480101854af2SCornelia Huck static void spapr_machine_6_2_class_options(MachineClass *mc) 480201854af2SCornelia Huck { 480301854af2SCornelia Huck spapr_machine_7_0_class_options(mc); 480401854af2SCornelia Huck compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len); 480501854af2SCornelia Huck } 480601854af2SCornelia Huck 480701854af2SCornelia Huck DEFINE_SPAPR_MACHINE(6_2, "6.2", false); 480852e64f5bSYanan Wang 480952e64f5bSYanan Wang /* 481052e64f5bSYanan Wang * pseries-6.1 481152e64f5bSYanan Wang */ 481252e64f5bSYanan Wang static void spapr_machine_6_1_class_options(MachineClass *mc) 481352e64f5bSYanan Wang { 4814e0eb84d4SDaniel Henrique Barboza SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4815e0eb84d4SDaniel Henrique Barboza 481652e64f5bSYanan Wang spapr_machine_6_2_class_options(mc); 481752e64f5bSYanan Wang compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); 4818e0eb84d4SDaniel Henrique Barboza smc->pre_6_2_numa_affinity = true; 48192b526199SYanan Wang mc->smp_props.prefer_sockets = true; 482052e64f5bSYanan Wang } 482152e64f5bSYanan Wang 482252e64f5bSYanan Wang DEFINE_SPAPR_MACHINE(6_1, "6.1", false); 4823da7e13c0SCornelia Huck 4824da7e13c0SCornelia Huck /* 4825da7e13c0SCornelia Huck * pseries-6.0 4826da7e13c0SCornelia Huck */ 4827da7e13c0SCornelia Huck static void spapr_machine_6_0_class_options(MachineClass *mc) 4828da7e13c0SCornelia Huck { 4829da7e13c0SCornelia Huck spapr_machine_6_1_class_options(mc); 4830da7e13c0SCornelia Huck compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); 4831da7e13c0SCornelia Huck } 4832da7e13c0SCornelia Huck 4833da7e13c0SCornelia Huck DEFINE_SPAPR_MACHINE(6_0, "6.0", false); 4834576a00bdSCornelia Huck 4835576a00bdSCornelia Huck /* 4836576a00bdSCornelia Huck * pseries-5.2 4837576a00bdSCornelia Huck */ 4838576a00bdSCornelia Huck static void spapr_machine_5_2_class_options(MachineClass *mc) 4839576a00bdSCornelia Huck { 4840576a00bdSCornelia Huck spapr_machine_6_0_class_options(mc); 4841576a00bdSCornelia Huck compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len); 4842576a00bdSCornelia Huck } 4843576a00bdSCornelia Huck 4844576a00bdSCornelia Huck DEFINE_SPAPR_MACHINE(5_2, "5.2", false); 48453ff3c5d3SCornelia Huck 48463ff3c5d3SCornelia Huck /* 48473ff3c5d3SCornelia Huck * pseries-5.1 48483ff3c5d3SCornelia Huck */ 48493ff3c5d3SCornelia Huck static void spapr_machine_5_1_class_options(MachineClass *mc) 48503ff3c5d3SCornelia Huck { 485129bfe52aSDaniel Henrique Barboza SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 485229bfe52aSDaniel Henrique Barboza 48533ff3c5d3SCornelia Huck spapr_machine_5_2_class_options(mc); 48543ff3c5d3SCornelia Huck compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len); 485529bfe52aSDaniel Henrique Barboza smc->pre_5_2_numa_associativity = true; 48563ff3c5d3SCornelia Huck } 48573ff3c5d3SCornelia Huck 48583ff3c5d3SCornelia Huck DEFINE_SPAPR_MACHINE(5_1, "5.1", false); 4859541aaa1dSCornelia Huck 4860541aaa1dSCornelia Huck /* 4861541aaa1dSCornelia Huck * pseries-5.0 4862541aaa1dSCornelia Huck */ 4863541aaa1dSCornelia Huck static void spapr_machine_5_0_class_options(MachineClass *mc) 4864541aaa1dSCornelia Huck { 4865a6030d7eSReza Arbab SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4866a6030d7eSReza Arbab static GlobalProperty compat[] = { 4867a6030d7eSReza Arbab { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" }, 4868a6030d7eSReza Arbab }; 4869a6030d7eSReza Arbab 4870541aaa1dSCornelia Huck spapr_machine_5_1_class_options(mc); 4871541aaa1dSCornelia Huck compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len); 4872a6030d7eSReza Arbab compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 487332a354dcSIgor Mammedov mc->numa_mem_supported = true; 4874a6030d7eSReza Arbab smc->pre_5_1_assoc_refpoints = true; 4875541aaa1dSCornelia Huck } 4876541aaa1dSCornelia Huck 4877541aaa1dSCornelia Huck DEFINE_SPAPR_MACHINE(5_0, "5.0", false); 48783eb74d20SCornelia Huck 48793eb74d20SCornelia Huck /* 48809aec2e52SCornelia Huck * pseries-4.2 4881e2676b16SGreg Kurz */ 48829aec2e52SCornelia Huck static void spapr_machine_4_2_class_options(MachineClass *mc) 4883e2676b16SGreg Kurz { 488437965dfeSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 488537965dfeSDavid Gibson 48863eb74d20SCornelia Huck spapr_machine_5_0_class_options(mc); 48875f258577SEvgeny Yakovlev compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len); 488837965dfeSDavid Gibson smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF; 48898af7e1feSNicholas Piggin smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF; 48901052ab67SDavid Gibson smc->rma_limit = 16 * GiB; 4891ee3a71e3SShivaprasad G Bhat mc->nvdimm_supported = false; 4892e2676b16SGreg Kurz } 4893e2676b16SGreg Kurz 48943eb74d20SCornelia Huck DEFINE_SPAPR_MACHINE(4_2, "4.2", false); 48959aec2e52SCornelia Huck 48969aec2e52SCornelia Huck /* 48979aec2e52SCornelia Huck * pseries-4.1 48989aec2e52SCornelia Huck */ 48999aec2e52SCornelia Huck static void spapr_machine_4_1_class_options(MachineClass *mc) 49009aec2e52SCornelia Huck { 49016c3829a2SAlexey Kardashevskiy SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4902d15d4ad6SDavid Gibson static GlobalProperty compat[] = { 4903d15d4ad6SDavid Gibson /* Only allow 4kiB and 64kiB IOMMU pagesizes */ 4904d15d4ad6SDavid Gibson { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" }, 4905d15d4ad6SDavid Gibson }; 4906d15d4ad6SDavid Gibson 49079aec2e52SCornelia Huck spapr_machine_4_2_class_options(mc); 49086c3829a2SAlexey Kardashevskiy smc->linux_pci_probe = false; 490929cb4187SGreg Kurz smc->smp_threads_vsmt = false; 49109aec2e52SCornelia Huck compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len); 4911d15d4ad6SDavid Gibson compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 49129aec2e52SCornelia Huck } 49139aec2e52SCornelia Huck 49149aec2e52SCornelia Huck DEFINE_SPAPR_MACHINE(4_1, "4.1", false); 49159bf2650bSCornelia Huck 49169bf2650bSCornelia Huck /* 49179bf2650bSCornelia Huck * pseries-4.0 49189bf2650bSCornelia Huck */ 4919f5598c92SGreg Kurz static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index, 4920ec132efaSAlexey Kardashevskiy uint64_t *buid, hwaddr *pio, 4921ec132efaSAlexey Kardashevskiy hwaddr *mmio32, hwaddr *mmio64, 4922ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4923ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4924ec132efaSAlexey Kardashevskiy { 4925f5598c92SGreg Kurz if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, 4926f5598c92SGreg Kurz liobns, nv2gpa, nv2atsd, errp)) { 4927f5598c92SGreg Kurz return false; 4928ec132efaSAlexey Kardashevskiy } 4929ec132efaSAlexey Kardashevskiy 4930f5598c92SGreg Kurz *nv2gpa = 0; 4931f5598c92SGreg Kurz *nv2atsd = 0; 4932f5598c92SGreg Kurz return true; 4933f5598c92SGreg Kurz } 4934eb3cba82SDavid Gibson static void spapr_machine_4_0_class_options(MachineClass *mc) 4935eb3cba82SDavid Gibson { 4936eb3cba82SDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4937eb3cba82SDavid Gibson 4938eb3cba82SDavid Gibson spapr_machine_4_1_class_options(mc); 4939eb3cba82SDavid Gibson compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); 4940eb3cba82SDavid Gibson smc->phb_placement = phb_placement_4_0; 4941bd94bc06SCédric Le Goater smc->irq = &spapr_irq_xics; 49423725ef1aSGreg Kurz smc->pre_4_1_migration = true; 4943eb3cba82SDavid Gibson } 4944eb3cba82SDavid Gibson 4945eb3cba82SDavid Gibson DEFINE_SPAPR_MACHINE(4_0, "4.0", false); 4946eb3cba82SDavid Gibson 4947eb3cba82SDavid Gibson /* 4948eb3cba82SDavid Gibson * pseries-3.1 4949eb3cba82SDavid Gibson */ 495088cbe073SMarc-André Lureau static void spapr_machine_3_1_class_options(MachineClass *mc) 495188cbe073SMarc-André Lureau { 4952ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4953fea35ca4SAlexey Kardashevskiy 495484e060bfSAlex Williamson spapr_machine_4_0_class_options(mc); 4955abd93cc7SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); 495627461d69SPrasad J Pandit 495734a6b015SCédric Le Goater mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); 4958fea35ca4SAlexey Kardashevskiy smc->update_dt_enabled = false; 4959dae5e39aSMichael Roth smc->dr_phb_enabled = false; 49600a794529SDavid Gibson smc->broken_host_serial_model = true; 49612782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN; 49622782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN; 49632782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN; 4964edaa7995SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; 496584e060bfSAlex Williamson } 496684e060bfSAlex Williamson 496784e060bfSAlex Williamson DEFINE_SPAPR_MACHINE(3_1, "3.1", false); 4968d45360d9SCédric Le Goater 4969d45360d9SCédric Le Goater /* 4970d45360d9SCédric Le Goater * pseries-3.0 4971d45360d9SCédric Le Goater */ 4972d45360d9SCédric Le Goater 4973d45360d9SCédric Le Goater static void spapr_machine_3_0_class_options(MachineClass *mc) 4974d45360d9SCédric Le Goater { 4975ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 497682cffa2eSCédric Le Goater 4977d45360d9SCédric Le Goater spapr_machine_3_1_class_options(mc); 4978ddb3235dSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len); 497982cffa2eSCédric Le Goater 498082cffa2eSCédric Le Goater smc->legacy_irq_allocation = true; 498154255c1fSDavid Gibson smc->nr_xirqs = 0x400; 4982ae837402SCédric Le Goater smc->irq = &spapr_irq_xics_legacy; 4983d45360d9SCédric Le Goater } 4984d45360d9SCédric Le Goater 4985d45360d9SCédric Le Goater DEFINE_SPAPR_MACHINE(3_0, "3.0", false); 49868a4fd427SDavid Gibson 49878a4fd427SDavid Gibson /* 49888a4fd427SDavid Gibson * pseries-2.12 49898a4fd427SDavid Gibson */ 499088cbe073SMarc-André Lureau static void spapr_machine_2_12_class_options(MachineClass *mc) 499188cbe073SMarc-André Lureau { 4992ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 499388cbe073SMarc-André Lureau static GlobalProperty compat[] = { 49946c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" }, 49956c36bddfSEduardo Habkost { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" }, 4996fa386d98SMarc-André Lureau }; 49978a4fd427SDavid Gibson 4998d8c0c7afSPeter Maydell spapr_machine_3_0_class_options(mc); 49990d47310bSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); 500088cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 50012309832aSDavid Gibson 5002e8937295SGreg Kurz /* We depend on kvm_enabled() to choose a default value for the 5003e8937295SGreg Kurz * hpt-max-page-size capability. Of course we can't do it here 5004e8937295SGreg Kurz * because this is too early and the HW accelerator isn't initialzed 5005e8937295SGreg Kurz * yet. Postpone this to machine init (see default_caps_with_cpu()). 5006e8937295SGreg Kurz */ 5007e8937295SGreg Kurz smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0; 50088a4fd427SDavid Gibson } 50098a4fd427SDavid Gibson 50108a4fd427SDavid Gibson DEFINE_SPAPR_MACHINE(2_12, "2.12", false); 50112b615412SDavid Gibson 5012813f3cf6SSuraj Jitindar Singh static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc) 5013813f3cf6SSuraj Jitindar Singh { 5014ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 5015813f3cf6SSuraj Jitindar Singh 5016813f3cf6SSuraj Jitindar Singh spapr_machine_2_12_class_options(mc); 5017813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; 5018813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; 5019813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD; 5020813f3cf6SSuraj Jitindar Singh } 5021813f3cf6SSuraj Jitindar Singh 5022813f3cf6SSuraj Jitindar Singh DEFINE_SPAPR_MACHINE(2_12_sxxm, "2.12-sxxm", false); 5023813f3cf6SSuraj Jitindar Singh 50242b615412SDavid Gibson /* 50252b615412SDavid Gibson * pseries-2.11 50262b615412SDavid Gibson */ 50272b615412SDavid Gibson 50282b615412SDavid Gibson static void spapr_machine_2_11_class_options(MachineClass *mc) 50292b615412SDavid Gibson { 5030ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 5031ee76a09fSDavid Gibson 50322b615412SDavid Gibson spapr_machine_2_12_class_options(mc); 50334e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON; 503443df70a9SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); 50352b615412SDavid Gibson } 50362b615412SDavid Gibson 50372b615412SDavid Gibson DEFINE_SPAPR_MACHINE(2_11, "2.11", false); 5038e2676b16SGreg Kurz 5039e2676b16SGreg Kurz /* 50403fa14fbeSDavid Gibson * pseries-2.10 5041db800b21SDavid Gibson */ 5042e2676b16SGreg Kurz 50433fa14fbeSDavid Gibson static void spapr_machine_2_10_class_options(MachineClass *mc) 5044db800b21SDavid Gibson { 5045e2676b16SGreg Kurz spapr_machine_2_11_class_options(mc); 5046503224f4SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); 5047db800b21SDavid Gibson } 5048db800b21SDavid Gibson 5049e2676b16SGreg Kurz DEFINE_SPAPR_MACHINE(2_10, "2.10", false); 50503fa14fbeSDavid Gibson 50513fa14fbeSDavid Gibson /* 50523fa14fbeSDavid Gibson * pseries-2.9 50533fa14fbeSDavid Gibson */ 505488cbe073SMarc-André Lureau 505588cbe073SMarc-André Lureau static void spapr_machine_2_9_class_options(MachineClass *mc) 505688cbe073SMarc-André Lureau { 5057ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 505888cbe073SMarc-André Lureau static GlobalProperty compat[] = { 50596c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" }, 5060fa386d98SMarc-André Lureau }; 50613fa14fbeSDavid Gibson 50623fa14fbeSDavid Gibson spapr_machine_2_10_class_options(mc); 50633e803152SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); 506488cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 506546f7afa3SGreg Kurz smc->pre_2_10_has_unused_icps = true; 506652b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED; 50673fa14fbeSDavid Gibson } 50683fa14fbeSDavid Gibson 50693fa14fbeSDavid Gibson DEFINE_SPAPR_MACHINE(2_9, "2.9", false); 5070fa325e6cSDavid Gibson 5071fa325e6cSDavid Gibson /* 5072fa325e6cSDavid Gibson * pseries-2.8 5073fa325e6cSDavid Gibson */ 507488cbe073SMarc-André Lureau 507588cbe073SMarc-André Lureau static void spapr_machine_2_8_class_options(MachineClass *mc) 507688cbe073SMarc-André Lureau { 507788cbe073SMarc-André Lureau static GlobalProperty compat[] = { 50786c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" }, 5079fa386d98SMarc-André Lureau }; 5080fa325e6cSDavid Gibson 5081fa325e6cSDavid Gibson spapr_machine_2_9_class_options(mc); 5082edc24ccdSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); 508388cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 508455641213SLaurent Vivier mc->numa_mem_align_shift = 23; 5085fa325e6cSDavid Gibson } 5086fa325e6cSDavid Gibson 5087fa325e6cSDavid Gibson DEFINE_SPAPR_MACHINE(2_8, "2.8", false); 5088db800b21SDavid Gibson 5089db800b21SDavid Gibson /* 50901ea1eefcSBharata B Rao * pseries-2.7 50911ea1eefcSBharata B Rao */ 5092357d1e3bSDavid Gibson 5093f5598c92SGreg Kurz static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, 5094357d1e3bSDavid Gibson uint64_t *buid, hwaddr *pio, 5095357d1e3bSDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 5096ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 5097ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 5098357d1e3bSDavid Gibson { 5099357d1e3bSDavid Gibson /* Legacy PHB placement for pseries-2.7 and earlier machine types */ 5100357d1e3bSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 5101357d1e3bSDavid Gibson const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */ 5102357d1e3bSDavid Gibson const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */ 5103357d1e3bSDavid Gibson const hwaddr pio_offset = 0x80000000; /* 2 GiB */ 5104357d1e3bSDavid Gibson const uint32_t max_index = 255; 5105357d1e3bSDavid Gibson const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */ 5106357d1e3bSDavid Gibson 5107357d1e3bSDavid Gibson uint64_t ram_top = MACHINE(spapr)->ram_size; 5108357d1e3bSDavid Gibson hwaddr phb0_base, phb_base; 5109357d1e3bSDavid Gibson int i; 5110357d1e3bSDavid Gibson 51110c9269a5SDavid Hildenbrand /* Do we have device memory? */ 5112357d1e3bSDavid Gibson if (MACHINE(spapr)->maxram_size > ram_top) { 5113357d1e3bSDavid Gibson /* Can't just use maxram_size, because there may be an 51140c9269a5SDavid Hildenbrand * alignment gap between normal and device memory regions 51150c9269a5SDavid Hildenbrand */ 5116b0c14ec4SDavid Hildenbrand ram_top = MACHINE(spapr)->device_memory->base + 5117b0c14ec4SDavid Hildenbrand memory_region_size(&MACHINE(spapr)->device_memory->mr); 5118357d1e3bSDavid Gibson } 5119357d1e3bSDavid Gibson 5120357d1e3bSDavid Gibson phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment); 5121357d1e3bSDavid Gibson 5122357d1e3bSDavid Gibson if (index > max_index) { 5123357d1e3bSDavid Gibson error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)", 5124357d1e3bSDavid Gibson max_index); 5125f5598c92SGreg Kurz return false; 5126357d1e3bSDavid Gibson } 5127357d1e3bSDavid Gibson 5128357d1e3bSDavid Gibson *buid = base_buid + index; 5129357d1e3bSDavid Gibson for (i = 0; i < n_dma; ++i) { 5130357d1e3bSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 5131357d1e3bSDavid Gibson } 5132357d1e3bSDavid Gibson 5133357d1e3bSDavid Gibson phb_base = phb0_base + index * phb_spacing; 5134357d1e3bSDavid Gibson *pio = phb_base + pio_offset; 5135357d1e3bSDavid Gibson *mmio32 = phb_base + mmio_offset; 5136357d1e3bSDavid Gibson /* 5137357d1e3bSDavid Gibson * We don't set the 64-bit MMIO window, relying on the PHB's 5138357d1e3bSDavid Gibson * fallback behaviour of automatically splitting a large "32-bit" 5139357d1e3bSDavid Gibson * window into contiguous 32-bit and 64-bit windows 5140357d1e3bSDavid Gibson */ 5141ec132efaSAlexey Kardashevskiy 5142ec132efaSAlexey Kardashevskiy *nv2gpa = 0; 5143ec132efaSAlexey Kardashevskiy *nv2atsd = 0; 5144f5598c92SGreg Kurz return true; 5145357d1e3bSDavid Gibson } 5146db800b21SDavid Gibson 51471ea1eefcSBharata B Rao static void spapr_machine_2_7_class_options(MachineClass *mc) 51481ea1eefcSBharata B Rao { 5149ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 515088cbe073SMarc-André Lureau static GlobalProperty compat[] = { 51516c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", }, 51526c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", }, 51536c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", }, 51546c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", }, 515588cbe073SMarc-André Lureau }; 51563daa4a9fSThomas Huth 5157db800b21SDavid Gibson spapr_machine_2_8_class_options(mc); 51582e9c10ebSIgor Mammedov mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3"); 5159a140c199SEduardo Habkost mc->default_machine_opts = "modern-hotplug-events=off"; 51605a995064SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); 516188cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 5162357d1e3bSDavid Gibson smc->phb_placement = phb_placement_2_7; 51631ea1eefcSBharata B Rao } 51641ea1eefcSBharata B Rao 5165db800b21SDavid Gibson DEFINE_SPAPR_MACHINE(2_7, "2.7", false); 51661ea1eefcSBharata B Rao 51671ea1eefcSBharata B Rao /* 51684b23699cSDavid Gibson * pseries-2.6 51694b23699cSDavid Gibson */ 517088cbe073SMarc-André Lureau 517188cbe073SMarc-André Lureau static void spapr_machine_2_6_class_options(MachineClass *mc) 517288cbe073SMarc-André Lureau { 517388cbe073SMarc-André Lureau static GlobalProperty compat[] = { 51746c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" }, 5175fa386d98SMarc-André Lureau }; 51761ea1eefcSBharata B Rao 51771ea1eefcSBharata B Rao spapr_machine_2_7_class_options(mc); 5178c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = false; 5179ff8f261fSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); 518088cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 51814b23699cSDavid Gibson } 51824b23699cSDavid Gibson 51831ea1eefcSBharata B Rao DEFINE_SPAPR_MACHINE(2_6, "2.6", false); 51844b23699cSDavid Gibson 51854b23699cSDavid Gibson /* 51861c5f29bbSDavid Gibson * pseries-2.5 51871c5f29bbSDavid Gibson */ 518888cbe073SMarc-André Lureau 518988cbe073SMarc-André Lureau static void spapr_machine_2_5_class_options(MachineClass *mc) 519088cbe073SMarc-André Lureau { 5191ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 519288cbe073SMarc-André Lureau static GlobalProperty compat[] = { 51936c36bddfSEduardo Habkost { "spapr-vlan", "use-rx-buffer-pools", "off" }, 5194fa386d98SMarc-André Lureau }; 51954b23699cSDavid Gibson 51964b23699cSDavid Gibson spapr_machine_2_6_class_options(mc); 519757040d45SThomas Huth smc->use_ohci_by_default = true; 5198fe759610SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len); 519988cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 52001c5f29bbSDavid Gibson } 52011c5f29bbSDavid Gibson 52024b23699cSDavid Gibson DEFINE_SPAPR_MACHINE(2_5, "2.5", false); 52031c5f29bbSDavid Gibson 52041c5f29bbSDavid Gibson /* 52051c5f29bbSDavid Gibson * pseries-2.4 52061c5f29bbSDavid Gibson */ 520780fd50f9SCornelia Huck 52085013c547SDavid Gibson static void spapr_machine_2_4_class_options(MachineClass *mc) 52095013c547SDavid Gibson { 5210ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 5211fc9f38c3SDavid Gibson 5212fc9f38c3SDavid Gibson spapr_machine_2_5_class_options(mc); 5213fc9f38c3SDavid Gibson smc->dr_lmb_enabled = false; 52142f99b9c2SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len); 52151c5f29bbSDavid Gibson } 52161c5f29bbSDavid Gibson 5217fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_4, "2.4", false); 52181c5f29bbSDavid Gibson 52191c5f29bbSDavid Gibson /* 52201c5f29bbSDavid Gibson * pseries-2.3 52211c5f29bbSDavid Gibson */ 522288cbe073SMarc-André Lureau 522388cbe073SMarc-André Lureau static void spapr_machine_2_3_class_options(MachineClass *mc) 522488cbe073SMarc-André Lureau { 522588cbe073SMarc-André Lureau static GlobalProperty compat[] = { 52266c36bddfSEduardo Habkost { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" }, 5227fa386d98SMarc-André Lureau }; 5228fc9f38c3SDavid Gibson spapr_machine_2_4_class_options(mc); 52298995dd90SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len); 523088cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 52311c5f29bbSDavid Gibson } 5232fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_3, "2.3", false); 52331c5f29bbSDavid Gibson 52341c5f29bbSDavid Gibson /* 52351c5f29bbSDavid Gibson * pseries-2.2 52361c5f29bbSDavid Gibson */ 523788cbe073SMarc-André Lureau 523888cbe073SMarc-André Lureau static void spapr_machine_2_2_class_options(MachineClass *mc) 523988cbe073SMarc-André Lureau { 524088cbe073SMarc-André Lureau static GlobalProperty compat[] = { 52416c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" }, 5242fa386d98SMarc-André Lureau }; 5243b194df47SAlexey Kardashevskiy 5244fc9f38c3SDavid Gibson spapr_machine_2_3_class_options(mc); 52451c30044eSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len); 524688cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 5247f6d0656bSEduardo Habkost mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on"; 52481c5f29bbSDavid Gibson } 5249fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_2, "2.2", false); 52501c5f29bbSDavid Gibson 52511c5f29bbSDavid Gibson /* 52521c5f29bbSDavid Gibson * pseries-2.1 52531c5f29bbSDavid Gibson */ 52541c5f29bbSDavid Gibson 52555013c547SDavid Gibson static void spapr_machine_2_1_class_options(MachineClass *mc) 5256b0e966d0SJason Wang { 5257fc9f38c3SDavid Gibson spapr_machine_2_2_class_options(mc); 5258c4fc5695SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len); 52596026db45SAlexey Kardashevskiy } 5260fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_1, "2.1", false); 52616026db45SAlexey Kardashevskiy 526229ee3247SAlexey Kardashevskiy static void spapr_machine_register_types(void) 526329ee3247SAlexey Kardashevskiy { 526429ee3247SAlexey Kardashevskiy type_register_static(&spapr_machine_info); 526529ee3247SAlexey Kardashevskiy } 526629ee3247SAlexey Kardashevskiy 526729ee3247SAlexey Kardashevskiy type_init(spapr_machine_register_types) 5268