153018216SPaolo Bonzini /* 253018216SPaolo Bonzini * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 353018216SPaolo Bonzini * 453018216SPaolo Bonzini * Copyright (c) 2004-2007 Fabrice Bellard 553018216SPaolo Bonzini * Copyright (c) 2007 Jocelyn Mayer 653018216SPaolo Bonzini * Copyright (c) 2010 David Gibson, IBM Corporation. 753018216SPaolo Bonzini * 853018216SPaolo Bonzini * Permission is hereby granted, free of charge, to any person obtaining a copy 953018216SPaolo Bonzini * of this software and associated documentation files (the "Software"), to deal 1053018216SPaolo Bonzini * in the Software without restriction, including without limitation the rights 1153018216SPaolo Bonzini * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1253018216SPaolo Bonzini * copies of the Software, and to permit persons to whom the Software is 1353018216SPaolo Bonzini * furnished to do so, subject to the following conditions: 1453018216SPaolo Bonzini * 1553018216SPaolo Bonzini * The above copyright notice and this permission notice shall be included in 1653018216SPaolo Bonzini * all copies or substantial portions of the Software. 1753018216SPaolo Bonzini * 1853018216SPaolo Bonzini * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1953018216SPaolo Bonzini * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2053018216SPaolo Bonzini * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 2153018216SPaolo Bonzini * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2253018216SPaolo Bonzini * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2353018216SPaolo Bonzini * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2453018216SPaolo Bonzini * THE SOFTWARE. 2553018216SPaolo Bonzini */ 26a8d25326SMarkus Armbruster 270d75590dSPeter Maydell #include "qemu/osdep.h" 28a8d25326SMarkus Armbruster #include "qemu-common.h" 292c65db5eSPaolo Bonzini #include "qemu/datadir.h" 30da34e65cSMarkus Armbruster #include "qapi/error.h" 31fa98fbfcSSam Bobroff #include "qapi/visitor.h" 3253018216SPaolo Bonzini #include "sysemu/sysemu.h" 33b58c5c2dSMarkus Armbruster #include "sysemu/hostmem.h" 34e35704baSEduardo Habkost #include "sysemu/numa.h" 3523ff81bdSGreg Kurz #include "sysemu/qtest.h" 3671e8a915SMarkus Armbruster #include "sysemu/reset.h" 3754d31236SMarkus Armbruster #include "sysemu/runstate.h" 3803dd024fSPaolo Bonzini #include "qemu/log.h" 3971461b0fSAlexey Kardashevskiy #include "hw/fw-path-provider.h" 4053018216SPaolo Bonzini #include "elf.h" 4153018216SPaolo Bonzini #include "net/net.h" 42ad440b4aSAndrew Jones #include "sysemu/device_tree.h" 4353018216SPaolo Bonzini #include "sysemu/cpus.h" 44b3946626SVincent Palatin #include "sysemu/hw_accel.h" 4553018216SPaolo Bonzini #include "kvm_ppc.h" 46c4b63b7cSJuan Quintela #include "migration/misc.h" 47ca77ee28SMarkus Armbruster #include "migration/qemu-file-types.h" 4884a899deSJuan Quintela #include "migration/global_state.h" 49f2a8f0a6SJuan Quintela #include "migration/register.h" 502500fb42SAravinda Prasad #include "migration/blocker.h" 514be21d56SDavid Gibson #include "mmu-hash64.h" 52b4db5413SSuraj Jitindar Singh #include "mmu-book3s-v3.h" 537abd43baSSuraj Jitindar Singh #include "cpu-models.h" 542e5b09fdSMarkus Armbruster #include "hw/core/cpu.h" 5553018216SPaolo Bonzini 5653018216SPaolo Bonzini #include "hw/boards.h" 570d09e41aSPaolo Bonzini #include "hw/ppc/ppc.h" 5853018216SPaolo Bonzini #include "hw/loader.h" 5953018216SPaolo Bonzini 607804c353SCédric Le Goater #include "hw/ppc/fdt.h" 610d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h" 620d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h" 63a27bd6c7SMarkus Armbruster #include "hw/qdev-properties.h" 640d09e41aSPaolo Bonzini #include "hw/pci-host/spapr.h" 6553018216SPaolo Bonzini #include "hw/pci/msi.h" 6653018216SPaolo Bonzini 6753018216SPaolo Bonzini #include "hw/pci/pci.h" 6871461b0fSAlexey Kardashevskiy #include "hw/scsi/scsi.h" 6971461b0fSAlexey Kardashevskiy #include "hw/virtio/virtio-scsi.h" 70c4e13492SFelipe Franciosi #include "hw/virtio/vhost-scsi-common.h" 7153018216SPaolo Bonzini 7253018216SPaolo Bonzini #include "exec/address-spaces.h" 732309832aSDavid Gibson #include "exec/ram_addr.h" 7453018216SPaolo Bonzini #include "hw/usb.h" 7553018216SPaolo Bonzini #include "qemu/config-file.h" 76135a129aSAneesh Kumar K.V #include "qemu/error-report.h" 772a6593cbSAlexey Kardashevskiy #include "trace.h" 7834316482SAlexey Kardashevskiy #include "hw/nmi.h" 796449da45SCédric Le Goater #include "hw/intc/intc.h" 8053018216SPaolo Bonzini 8194a94e4cSBharata B Rao #include "hw/ppc/spapr_cpu_core.h" 822cc0e2e8SDavid Hildenbrand #include "hw/mem/memory-device.h" 830fb6bd07SMichael Roth #include "hw/ppc/spapr_tpm_proxy.h" 84ee3a71e3SShivaprasad G Bhat #include "hw/ppc/spapr_nvdimm.h" 851eee9950SDaniel Henrique Barboza #include "hw/ppc/spapr_numa.h" 866c8ebe30SDavid Gibson #include "hw/ppc/pef.h" 8768a27b20SMichael S. Tsirkin 88f041d6afSGreg Kurz #include "monitor/monitor.h" 89f041d6afSGreg Kurz 9053018216SPaolo Bonzini #include <libfdt.h> 9153018216SPaolo Bonzini 9253018216SPaolo Bonzini /* SLOF memory layout: 9353018216SPaolo Bonzini * 9453018216SPaolo Bonzini * SLOF raw image loaded at 0, copies its romfs right below the flat 9553018216SPaolo Bonzini * device-tree, then position SLOF itself 31M below that 9653018216SPaolo Bonzini * 9753018216SPaolo Bonzini * So we set FW_OVERHEAD to 40MB which should account for all of that 9853018216SPaolo Bonzini * and more 9953018216SPaolo Bonzini * 10053018216SPaolo Bonzini * We load our kernel at 4M, leaving space for SLOF initial image 10153018216SPaolo Bonzini */ 102b7d1f77aSBenjamin Herrenschmidt #define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */ 10353018216SPaolo Bonzini #define FW_MAX_SIZE 0x400000 10453018216SPaolo Bonzini #define FW_FILE_NAME "slof.bin" 10553018216SPaolo Bonzini #define FW_OVERHEAD 0x2800000 10653018216SPaolo Bonzini #define KERNEL_LOAD_ADDR FW_MAX_SIZE 10753018216SPaolo Bonzini 1089943266eSDavid Gibson #define MIN_RMA_SLOF (128 * MiB) 10953018216SPaolo Bonzini 1105c7adcf4SGreg Kurz #define PHANDLE_INTC 0x00001111 11153018216SPaolo Bonzini 1125d0fb150SGreg Kurz /* These two functions implement the VCPU id numbering: one to compute them 1135d0fb150SGreg Kurz * all and one to identify thread 0 of a VCORE. Any change to the first one 1145d0fb150SGreg Kurz * is likely to have an impact on the second one, so let's keep them close. 1155d0fb150SGreg Kurz */ 116ce2918cbSDavid Gibson static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index) 1175d0fb150SGreg Kurz { 118fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 119fe6b6346SLike Xu unsigned int smp_threads = ms->smp.threads; 120fe6b6346SLike Xu 1211a5008fcSGreg Kurz assert(spapr->vsmt); 1225d0fb150SGreg Kurz return 1235d0fb150SGreg Kurz (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads; 1245d0fb150SGreg Kurz } 125ce2918cbSDavid Gibson static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr, 1265d0fb150SGreg Kurz PowerPCCPU *cpu) 1275d0fb150SGreg Kurz { 1281a5008fcSGreg Kurz assert(spapr->vsmt); 1295d0fb150SGreg Kurz return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0; 1305d0fb150SGreg Kurz } 1315d0fb150SGreg Kurz 13246f7afa3SGreg Kurz static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque) 13346f7afa3SGreg Kurz { 13446f7afa3SGreg Kurz /* Dummy entries correspond to unused ICPState objects in older QEMUs, 13546f7afa3SGreg Kurz * and newer QEMUs don't even have them. In both cases, we don't want 13646f7afa3SGreg Kurz * to send anything on the wire. 13746f7afa3SGreg Kurz */ 13846f7afa3SGreg Kurz return false; 13946f7afa3SGreg Kurz } 14046f7afa3SGreg Kurz 14146f7afa3SGreg Kurz static const VMStateDescription pre_2_10_vmstate_dummy_icp = { 14246f7afa3SGreg Kurz .name = "icp/server", 14346f7afa3SGreg Kurz .version_id = 1, 14446f7afa3SGreg Kurz .minimum_version_id = 1, 14546f7afa3SGreg Kurz .needed = pre_2_10_vmstate_dummy_icp_needed, 14646f7afa3SGreg Kurz .fields = (VMStateField[]) { 14746f7afa3SGreg Kurz VMSTATE_UNUSED(4), /* uint32_t xirr */ 14846f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t pending_priority */ 14946f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t mfrr */ 15046f7afa3SGreg Kurz VMSTATE_END_OF_LIST() 15146f7afa3SGreg Kurz }, 15246f7afa3SGreg Kurz }; 15346f7afa3SGreg Kurz 15446f7afa3SGreg Kurz static void pre_2_10_vmstate_register_dummy_icp(int i) 15546f7afa3SGreg Kurz { 15646f7afa3SGreg Kurz vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp, 15746f7afa3SGreg Kurz (void *)(uintptr_t) i); 15846f7afa3SGreg Kurz } 15946f7afa3SGreg Kurz 16046f7afa3SGreg Kurz static void pre_2_10_vmstate_unregister_dummy_icp(int i) 16146f7afa3SGreg Kurz { 16246f7afa3SGreg Kurz vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp, 16346f7afa3SGreg Kurz (void *)(uintptr_t) i); 16446f7afa3SGreg Kurz } 16546f7afa3SGreg Kurz 166ce2918cbSDavid Gibson int spapr_max_server_number(SpaprMachineState *spapr) 16746f7afa3SGreg Kurz { 168fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 169fe6b6346SLike Xu 1701a5008fcSGreg Kurz assert(spapr->vsmt); 171fe6b6346SLike Xu return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads); 17246f7afa3SGreg Kurz } 17346f7afa3SGreg Kurz 174833d4668SAlexey Kardashevskiy static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, 175833d4668SAlexey Kardashevskiy int smt_threads) 176833d4668SAlexey Kardashevskiy { 177833d4668SAlexey Kardashevskiy int i, ret = 0; 178833d4668SAlexey Kardashevskiy uint32_t servers_prop[smt_threads]; 179833d4668SAlexey Kardashevskiy uint32_t gservers_prop[smt_threads * 2]; 18014bb4486SGreg Kurz int index = spapr_get_vcpu_id(cpu); 181833d4668SAlexey Kardashevskiy 182d6e166c0SDavid Gibson if (cpu->compat_pvr) { 183d6e166c0SDavid Gibson ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr); 1846d9412eaSAlexey Kardashevskiy if (ret < 0) { 1856d9412eaSAlexey Kardashevskiy return ret; 1866d9412eaSAlexey Kardashevskiy } 1876d9412eaSAlexey Kardashevskiy } 1886d9412eaSAlexey Kardashevskiy 189833d4668SAlexey Kardashevskiy /* Build interrupt servers and gservers properties */ 190833d4668SAlexey Kardashevskiy for (i = 0; i < smt_threads; i++) { 191833d4668SAlexey Kardashevskiy servers_prop[i] = cpu_to_be32(index + i); 192833d4668SAlexey Kardashevskiy /* Hack, direct the group queues back to cpu 0 */ 193833d4668SAlexey Kardashevskiy gservers_prop[i*2] = cpu_to_be32(index + i); 194833d4668SAlexey Kardashevskiy gservers_prop[i*2 + 1] = 0; 195833d4668SAlexey Kardashevskiy } 196833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", 197833d4668SAlexey Kardashevskiy servers_prop, sizeof(servers_prop)); 198833d4668SAlexey Kardashevskiy if (ret < 0) { 199833d4668SAlexey Kardashevskiy return ret; 200833d4668SAlexey Kardashevskiy } 201833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s", 202833d4668SAlexey Kardashevskiy gservers_prop, sizeof(gservers_prop)); 203833d4668SAlexey Kardashevskiy 204833d4668SAlexey Kardashevskiy return ret; 205833d4668SAlexey Kardashevskiy } 206833d4668SAlexey Kardashevskiy 20791335a5eSDavid Gibson static void spapr_dt_pa_features(SpaprMachineState *spapr, 208ee76a09fSDavid Gibson PowerPCCPU *cpu, 209daa36379SDavid Gibson void *fdt, int offset) 21086d5771aSSam Bobroff { 21186d5771aSSam Bobroff uint8_t pa_features_206[] = { 6, 0, 21286d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 }; 21386d5771aSSam Bobroff uint8_t pa_features_207[] = { 24, 0, 21486d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, 21586d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 21686d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 21786d5771aSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; 2189fb4541fSSam Bobroff uint8_t pa_features_300[] = { 66, 0, 2199fb4541fSSam Bobroff /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ 2209fb4541fSSam Bobroff /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */ 22186d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ 2229fb4541fSSam Bobroff /* 6: DS207 */ 22386d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ 2249fb4541fSSam Bobroff /* 16: Vector */ 22586d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ 2269fb4541fSSam Bobroff /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ 2279bf502feSDavid Gibson 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ 2289fb4541fSSam Bobroff /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ 2299fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ 2309fb4541fSSam Bobroff /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */ 2319fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ 2329fb4541fSSam Bobroff /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */ 2339fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */ 2349fb4541fSSam Bobroff /* 42: PM, 44: PC RA, 46: SC vec'd */ 2359fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ 2369fb4541fSSam Bobroff /* 48: SIMD, 50: QP BFP, 52: String */ 2379fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ 2389fb4541fSSam Bobroff /* 54: DecFP, 56: DecI, 58: SHA */ 2399fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ 2409fb4541fSSam Bobroff /* 60: NM atomic, 62: RNG */ 2419fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ 2429fb4541fSSam Bobroff }; 2437abd43baSSuraj Jitindar Singh uint8_t *pa_features = NULL; 24486d5771aSSam Bobroff size_t pa_size; 24586d5771aSSam Bobroff 2467abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) { 24786d5771aSSam Bobroff pa_features = pa_features_206; 24886d5771aSSam Bobroff pa_size = sizeof(pa_features_206); 2497abd43baSSuraj Jitindar Singh } 2507abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) { 25186d5771aSSam Bobroff pa_features = pa_features_207; 25286d5771aSSam Bobroff pa_size = sizeof(pa_features_207); 2537abd43baSSuraj Jitindar Singh } 2547abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) { 25586d5771aSSam Bobroff pa_features = pa_features_300; 25686d5771aSSam Bobroff pa_size = sizeof(pa_features_300); 2577abd43baSSuraj Jitindar Singh } 2587abd43baSSuraj Jitindar Singh if (!pa_features) { 25986d5771aSSam Bobroff return; 26086d5771aSSam Bobroff } 26186d5771aSSam Bobroff 26226cd35b8SDavid Gibson if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) { 26386d5771aSSam Bobroff /* 26486d5771aSSam Bobroff * Note: we keep CI large pages off by default because a 64K capable 26586d5771aSSam Bobroff * guest provisioned with large pages might otherwise try to map a qemu 26686d5771aSSam Bobroff * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages 26786d5771aSSam Bobroff * even if that qemu runs on a 4k host. 26886d5771aSSam Bobroff * We dd this bit back here if we are confident this is not an issue 26986d5771aSSam Bobroff */ 27086d5771aSSam Bobroff pa_features[3] |= 0x20; 27186d5771aSSam Bobroff } 2724e5fe368SSuraj Jitindar Singh if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) { 27386d5771aSSam Bobroff pa_features[24] |= 0x80; /* Transactional memory support */ 27486d5771aSSam Bobroff } 275daa36379SDavid Gibson if (spapr->cas_pre_isa3_guest && pa_size > 40) { 276e957f6a9SSam Bobroff /* Workaround for broken kernels that attempt (guest) radix 277e957f6a9SSam Bobroff * mode when they can't handle it, if they see the radix bit set 278e957f6a9SSam Bobroff * in pa-features. So hide it from them. */ 279e957f6a9SSam Bobroff pa_features[40 + 2] &= ~0x80; /* Radix MMU */ 280e957f6a9SSam Bobroff } 28186d5771aSSam Bobroff 28286d5771aSSam Bobroff _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size))); 28386d5771aSSam Bobroff } 28486d5771aSSam Bobroff 285c86c1affSDaniel Henrique Barboza static hwaddr spapr_node0_size(MachineState *machine) 286b082d65aSAlexey Kardashevskiy { 287aa570207STao Xu if (machine->numa_state->num_nodes) { 288b082d65aSAlexey Kardashevskiy int i; 289aa570207STao Xu for (i = 0; i < machine->numa_state->num_nodes; ++i) { 2907e721e7bSTao Xu if (machine->numa_state->nodes[i].node_mem) { 2917e721e7bSTao Xu return MIN(pow2floor(machine->numa_state->nodes[i].node_mem), 292fb164994SDavid Gibson machine->ram_size); 293b082d65aSAlexey Kardashevskiy } 294b082d65aSAlexey Kardashevskiy } 295b082d65aSAlexey Kardashevskiy } 296fb164994SDavid Gibson return machine->ram_size; 297b082d65aSAlexey Kardashevskiy } 298b082d65aSAlexey Kardashevskiy 299a1d59c0fSAlexey Kardashevskiy static void add_str(GString *s, const gchar *s1) 300a1d59c0fSAlexey Kardashevskiy { 301a1d59c0fSAlexey Kardashevskiy g_string_append_len(s, s1, strlen(s1) + 1); 302a1d59c0fSAlexey Kardashevskiy } 30353018216SPaolo Bonzini 304f1aa45ffSDaniel Henrique Barboza static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid, 305f1aa45ffSDaniel Henrique Barboza hwaddr start, hwaddr size) 30626a8c353SAlexey Kardashevskiy { 30726a8c353SAlexey Kardashevskiy char mem_name[32]; 30826a8c353SAlexey Kardashevskiy uint64_t mem_reg_property[2]; 30926a8c353SAlexey Kardashevskiy int off; 31026a8c353SAlexey Kardashevskiy 31126a8c353SAlexey Kardashevskiy mem_reg_property[0] = cpu_to_be64(start); 31226a8c353SAlexey Kardashevskiy mem_reg_property[1] = cpu_to_be64(size); 31326a8c353SAlexey Kardashevskiy 3143a17e38fSAlexey Kardashevskiy sprintf(mem_name, "memory@%" HWADDR_PRIx, start); 31526a8c353SAlexey Kardashevskiy off = fdt_add_subnode(fdt, 0, mem_name); 31626a8c353SAlexey Kardashevskiy _FDT(off); 31726a8c353SAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 31826a8c353SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 31926a8c353SAlexey Kardashevskiy sizeof(mem_reg_property)))); 320f1aa45ffSDaniel Henrique Barboza spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid); 32103d196b7SBharata B Rao return off; 32226a8c353SAlexey Kardashevskiy } 32326a8c353SAlexey Kardashevskiy 324f47bd1c8SIgor Mammedov static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr) 325f47bd1c8SIgor Mammedov { 326f47bd1c8SIgor Mammedov MemoryDeviceInfoList *info; 327f47bd1c8SIgor Mammedov 328f47bd1c8SIgor Mammedov for (info = list; info; info = info->next) { 329f47bd1c8SIgor Mammedov MemoryDeviceInfo *value = info->value; 330f47bd1c8SIgor Mammedov 331f47bd1c8SIgor Mammedov if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) { 332f47bd1c8SIgor Mammedov PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data; 333f47bd1c8SIgor Mammedov 334ccc2cef8SDavid Gibson if (addr >= pcdimm_info->addr && 335f47bd1c8SIgor Mammedov addr < (pcdimm_info->addr + pcdimm_info->size)) { 336f47bd1c8SIgor Mammedov return pcdimm_info->node; 337f47bd1c8SIgor Mammedov } 338f47bd1c8SIgor Mammedov } 339f47bd1c8SIgor Mammedov } 340f47bd1c8SIgor Mammedov 341f47bd1c8SIgor Mammedov return -1; 342f47bd1c8SIgor Mammedov } 343f47bd1c8SIgor Mammedov 344a324d6f1SBharata B Rao struct sPAPRDrconfCellV2 { 345a324d6f1SBharata B Rao uint32_t seq_lmbs; 346a324d6f1SBharata B Rao uint64_t base_addr; 347a324d6f1SBharata B Rao uint32_t drc_index; 348a324d6f1SBharata B Rao uint32_t aa_index; 349a324d6f1SBharata B Rao uint32_t flags; 350a324d6f1SBharata B Rao } QEMU_PACKED; 351a324d6f1SBharata B Rao 352a324d6f1SBharata B Rao typedef struct DrconfCellQueue { 353a324d6f1SBharata B Rao struct sPAPRDrconfCellV2 cell; 354a324d6f1SBharata B Rao QSIMPLEQ_ENTRY(DrconfCellQueue) entry; 355a324d6f1SBharata B Rao } DrconfCellQueue; 356a324d6f1SBharata B Rao 357a324d6f1SBharata B Rao static DrconfCellQueue * 358a324d6f1SBharata B Rao spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr, 359a324d6f1SBharata B Rao uint32_t drc_index, uint32_t aa_index, 360a324d6f1SBharata B Rao uint32_t flags) 36103d196b7SBharata B Rao { 362a324d6f1SBharata B Rao DrconfCellQueue *elem; 363a324d6f1SBharata B Rao 364a324d6f1SBharata B Rao elem = g_malloc0(sizeof(*elem)); 365a324d6f1SBharata B Rao elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs); 366a324d6f1SBharata B Rao elem->cell.base_addr = cpu_to_be64(base_addr); 367a324d6f1SBharata B Rao elem->cell.drc_index = cpu_to_be32(drc_index); 368a324d6f1SBharata B Rao elem->cell.aa_index = cpu_to_be32(aa_index); 369a324d6f1SBharata B Rao elem->cell.flags = cpu_to_be32(flags); 370a324d6f1SBharata B Rao 371a324d6f1SBharata B Rao return elem; 372a324d6f1SBharata B Rao } 373a324d6f1SBharata B Rao 37491335a5eSDavid Gibson static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt, 375a324d6f1SBharata B Rao int offset, MemoryDeviceInfoList *dimms) 3762a6593cbSAlexey Kardashevskiy { 3772a6593cbSAlexey Kardashevskiy MachineState *machine = MACHINE(spapr); 378cc941111SFabiano Rosas uint8_t *int_buf, *cur_index; 379a324d6f1SBharata B Rao int ret; 38003d196b7SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 381a324d6f1SBharata B Rao uint64_t addr, cur_addr, size; 382b0c14ec4SDavid Hildenbrand uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size); 383b0c14ec4SDavid Hildenbrand uint64_t mem_end = machine->device_memory->base + 384b0c14ec4SDavid Hildenbrand memory_region_size(&machine->device_memory->mr); 385cc941111SFabiano Rosas uint32_t node, buf_len, nr_entries = 0; 386ce2918cbSDavid Gibson SpaprDrc *drc; 387a324d6f1SBharata B Rao DrconfCellQueue *elem, *next; 388a324d6f1SBharata B Rao MemoryDeviceInfoList *info; 389a324d6f1SBharata B Rao QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue 390a324d6f1SBharata B Rao = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue); 391a324d6f1SBharata B Rao 392a324d6f1SBharata B Rao /* Entry to cover RAM and the gap area */ 393a324d6f1SBharata B Rao elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1, 394a324d6f1SBharata B Rao SPAPR_LMB_FLAGS_RESERVED | 395a324d6f1SBharata B Rao SPAPR_LMB_FLAGS_DRC_INVALID); 396a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 397a324d6f1SBharata B Rao nr_entries++; 398a324d6f1SBharata B Rao 399b0c14ec4SDavid Hildenbrand cur_addr = machine->device_memory->base; 400a324d6f1SBharata B Rao for (info = dimms; info; info = info->next) { 401a324d6f1SBharata B Rao PCDIMMDeviceInfo *di = info->value->u.dimm.data; 402a324d6f1SBharata B Rao 403a324d6f1SBharata B Rao addr = di->addr; 404a324d6f1SBharata B Rao size = di->size; 405a324d6f1SBharata B Rao node = di->node; 406a324d6f1SBharata B Rao 407ee3a71e3SShivaprasad G Bhat /* 408ee3a71e3SShivaprasad G Bhat * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The 409ee3a71e3SShivaprasad G Bhat * area is marked hotpluggable in the next iteration for the bigger 410ee3a71e3SShivaprasad G Bhat * chunk including the NVDIMM occupied area. 411ee3a71e3SShivaprasad G Bhat */ 412ee3a71e3SShivaprasad G Bhat if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM) 413ee3a71e3SShivaprasad G Bhat continue; 414ee3a71e3SShivaprasad G Bhat 415a324d6f1SBharata B Rao /* Entry for hot-pluggable area */ 416a324d6f1SBharata B Rao if (cur_addr < addr) { 417a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); 418a324d6f1SBharata B Rao g_assert(drc); 419a324d6f1SBharata B Rao elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size, 420a324d6f1SBharata B Rao cur_addr, spapr_drc_index(drc), -1, 0); 421a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 422a324d6f1SBharata B Rao nr_entries++; 423a324d6f1SBharata B Rao } 424a324d6f1SBharata B Rao 425a324d6f1SBharata B Rao /* Entry for DIMM */ 426a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size); 427a324d6f1SBharata B Rao g_assert(drc); 428a324d6f1SBharata B Rao elem = spapr_get_drconf_cell(size / lmb_size, addr, 429a324d6f1SBharata B Rao spapr_drc_index(drc), node, 4300911a60cSLeonardo Bras (SPAPR_LMB_FLAGS_ASSIGNED | 4310911a60cSLeonardo Bras SPAPR_LMB_FLAGS_HOTREMOVABLE)); 432a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 433a324d6f1SBharata B Rao nr_entries++; 434a324d6f1SBharata B Rao cur_addr = addr + size; 435a324d6f1SBharata B Rao } 436a324d6f1SBharata B Rao 437a324d6f1SBharata B Rao /* Entry for remaining hotpluggable area */ 438a324d6f1SBharata B Rao if (cur_addr < mem_end) { 439a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); 440a324d6f1SBharata B Rao g_assert(drc); 441a324d6f1SBharata B Rao elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size, 442a324d6f1SBharata B Rao cur_addr, spapr_drc_index(drc), -1, 0); 443a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 444a324d6f1SBharata B Rao nr_entries++; 445a324d6f1SBharata B Rao } 446a324d6f1SBharata B Rao 447a324d6f1SBharata B Rao buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t); 448a324d6f1SBharata B Rao int_buf = cur_index = g_malloc0(buf_len); 449a324d6f1SBharata B Rao *(uint32_t *)int_buf = cpu_to_be32(nr_entries); 450a324d6f1SBharata B Rao cur_index += sizeof(nr_entries); 451a324d6f1SBharata B Rao 452a324d6f1SBharata B Rao QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) { 453a324d6f1SBharata B Rao memcpy(cur_index, &elem->cell, sizeof(elem->cell)); 454a324d6f1SBharata B Rao cur_index += sizeof(elem->cell); 455a324d6f1SBharata B Rao QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry); 456a324d6f1SBharata B Rao g_free(elem); 457a324d6f1SBharata B Rao } 458a324d6f1SBharata B Rao 459a324d6f1SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len); 460a324d6f1SBharata B Rao g_free(int_buf); 461a324d6f1SBharata B Rao if (ret < 0) { 462a324d6f1SBharata B Rao return -1; 463a324d6f1SBharata B Rao } 464a324d6f1SBharata B Rao return 0; 465a324d6f1SBharata B Rao } 466a324d6f1SBharata B Rao 46791335a5eSDavid Gibson static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt, 468a324d6f1SBharata B Rao int offset, MemoryDeviceInfoList *dimms) 469a324d6f1SBharata B Rao { 470b0c14ec4SDavid Hildenbrand MachineState *machine = MACHINE(spapr); 471a324d6f1SBharata B Rao int i, ret; 472a324d6f1SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 4730c9269a5SDavid Hildenbrand uint32_t device_lmb_start = machine->device_memory->base / lmb_size; 474b0c14ec4SDavid Hildenbrand uint32_t nr_lmbs = (machine->device_memory->base + 475b0c14ec4SDavid Hildenbrand memory_region_size(&machine->device_memory->mr)) / 476d0e5a8f2SBharata B Rao lmb_size; 47703d196b7SBharata B Rao uint32_t *int_buf, *cur_index, buf_len; 47816c25aefSBharata B Rao 47916c25aefSBharata B Rao /* 480ef001f06SThomas Huth * Allocate enough buffer size to fit in ibm,dynamic-memory 481ef001f06SThomas Huth */ 482a324d6f1SBharata B Rao buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t); 48303d196b7SBharata B Rao cur_index = int_buf = g_malloc0(buf_len); 48403d196b7SBharata B Rao int_buf[0] = cpu_to_be32(nr_lmbs); 48503d196b7SBharata B Rao cur_index++; 48603d196b7SBharata B Rao for (i = 0; i < nr_lmbs; i++) { 487d0e5a8f2SBharata B Rao uint64_t addr = i * lmb_size; 48803d196b7SBharata B Rao uint32_t *dynamic_memory = cur_index; 48903d196b7SBharata B Rao 4900c9269a5SDavid Hildenbrand if (i >= device_lmb_start) { 491ce2918cbSDavid Gibson SpaprDrc *drc; 492d0e5a8f2SBharata B Rao 493fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i); 49403d196b7SBharata B Rao g_assert(drc); 49503d196b7SBharata B Rao 49603d196b7SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 49703d196b7SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 4980b55aa91SDavid Gibson dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc)); 49903d196b7SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 500f47bd1c8SIgor Mammedov dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr)); 501d0e5a8f2SBharata B Rao if (memory_region_present(get_system_memory(), addr)) { 50203d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED); 50303d196b7SBharata B Rao } else { 50403d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(0); 50503d196b7SBharata B Rao } 506d0e5a8f2SBharata B Rao } else { 507d0e5a8f2SBharata B Rao /* 508d0e5a8f2SBharata B Rao * LMB information for RMA, boot time RAM and gap b/n RAM and 5090c9269a5SDavid Hildenbrand * device memory region -- all these are marked as reserved 510d0e5a8f2SBharata B Rao * and as having no valid DRC. 511d0e5a8f2SBharata B Rao */ 512d0e5a8f2SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 513d0e5a8f2SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 514d0e5a8f2SBharata B Rao dynamic_memory[2] = cpu_to_be32(0); 515d0e5a8f2SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 516d0e5a8f2SBharata B Rao dynamic_memory[4] = cpu_to_be32(-1); 517d0e5a8f2SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED | 518d0e5a8f2SBharata B Rao SPAPR_LMB_FLAGS_DRC_INVALID); 519d0e5a8f2SBharata B Rao } 52003d196b7SBharata B Rao 52103d196b7SBharata B Rao cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE; 52203d196b7SBharata B Rao } 52303d196b7SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len); 524a324d6f1SBharata B Rao g_free(int_buf); 52503d196b7SBharata B Rao if (ret < 0) { 526a324d6f1SBharata B Rao return -1; 527a324d6f1SBharata B Rao } 528a324d6f1SBharata B Rao return 0; 529a324d6f1SBharata B Rao } 530a324d6f1SBharata B Rao 531a324d6f1SBharata B Rao /* 532a324d6f1SBharata B Rao * Adds ibm,dynamic-reconfiguration-memory node. 533a324d6f1SBharata B Rao * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation 534a324d6f1SBharata B Rao * of this device tree node. 535a324d6f1SBharata B Rao */ 53691335a5eSDavid Gibson static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr, 53791335a5eSDavid Gibson void *fdt) 538a324d6f1SBharata B Rao { 539a324d6f1SBharata B Rao MachineState *machine = MACHINE(spapr); 5400ee52012SDaniel Henrique Barboza int ret, offset; 541a324d6f1SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 5427abf9797SAnton Blanchard uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32), 5437abf9797SAnton Blanchard cpu_to_be32(lmb_size & 0xffffffff)}; 544a324d6f1SBharata B Rao MemoryDeviceInfoList *dimms = NULL; 545a324d6f1SBharata B Rao 546a324d6f1SBharata B Rao /* 5470c9269a5SDavid Hildenbrand * Don't create the node if there is no device memory 548a324d6f1SBharata B Rao */ 549a324d6f1SBharata B Rao if (machine->ram_size == machine->maxram_size) { 550a324d6f1SBharata B Rao return 0; 551a324d6f1SBharata B Rao } 552a324d6f1SBharata B Rao 553a324d6f1SBharata B Rao offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory"); 554a324d6f1SBharata B Rao 555a324d6f1SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size, 556a324d6f1SBharata B Rao sizeof(prop_lmb_size)); 557a324d6f1SBharata B Rao if (ret < 0) { 558a324d6f1SBharata B Rao return ret; 559a324d6f1SBharata B Rao } 560a324d6f1SBharata B Rao 561a324d6f1SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff); 562a324d6f1SBharata B Rao if (ret < 0) { 563a324d6f1SBharata B Rao return ret; 564a324d6f1SBharata B Rao } 565a324d6f1SBharata B Rao 566a324d6f1SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0); 567a324d6f1SBharata B Rao if (ret < 0) { 568a324d6f1SBharata B Rao return ret; 569a324d6f1SBharata B Rao } 570a324d6f1SBharata B Rao 571a324d6f1SBharata B Rao /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */ 5722cc0e2e8SDavid Hildenbrand dimms = qmp_memory_device_list(); 573a324d6f1SBharata B Rao if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) { 57491335a5eSDavid Gibson ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms); 575a324d6f1SBharata B Rao } else { 57691335a5eSDavid Gibson ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms); 577a324d6f1SBharata B Rao } 578a324d6f1SBharata B Rao qapi_free_MemoryDeviceInfoList(dimms); 579a324d6f1SBharata B Rao 580a324d6f1SBharata B Rao if (ret < 0) { 581a324d6f1SBharata B Rao return ret; 58203d196b7SBharata B Rao } 58303d196b7SBharata B Rao 5840ee52012SDaniel Henrique Barboza ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset); 585a324d6f1SBharata B Rao 58603d196b7SBharata B Rao return ret; 58703d196b7SBharata B Rao } 58803d196b7SBharata B Rao 58991335a5eSDavid Gibson static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt) 5906787d27bSMichael Roth { 591fa523f0dSDavid Gibson MachineState *machine = MACHINE(spapr); 592ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 59353018216SPaolo Bonzini hwaddr mem_start, node_size; 59453018216SPaolo Bonzini int i, nb_nodes = machine->numa_state->num_nodes; 59553018216SPaolo Bonzini NodeInfo *nodes = machine->numa_state->nodes; 59653018216SPaolo Bonzini 59753018216SPaolo Bonzini for (i = 0, mem_start = 0; i < nb_nodes; ++i) { 59853018216SPaolo Bonzini if (!nodes[i].node_mem) { 59953018216SPaolo Bonzini continue; 60053018216SPaolo Bonzini } 60153018216SPaolo Bonzini if (mem_start >= machine->ram_size) { 60253018216SPaolo Bonzini node_size = 0; 60353018216SPaolo Bonzini } else { 60453018216SPaolo Bonzini node_size = nodes[i].node_mem; 60553018216SPaolo Bonzini if (node_size > machine->ram_size - mem_start) { 60653018216SPaolo Bonzini node_size = machine->ram_size - mem_start; 60753018216SPaolo Bonzini } 60853018216SPaolo Bonzini } 60953018216SPaolo Bonzini if (!mem_start) { 61053018216SPaolo Bonzini /* spapr_machine_init() checks for rma_size <= node0_size 61153018216SPaolo Bonzini * already */ 612f1aa45ffSDaniel Henrique Barboza spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size); 61353018216SPaolo Bonzini mem_start += spapr->rma_size; 61453018216SPaolo Bonzini node_size -= spapr->rma_size; 61553018216SPaolo Bonzini } 61653018216SPaolo Bonzini for ( ; node_size; ) { 61753018216SPaolo Bonzini hwaddr sizetmp = pow2floor(node_size); 61853018216SPaolo Bonzini 61953018216SPaolo Bonzini /* mem_start != 0 here */ 62053018216SPaolo Bonzini if (ctzl(mem_start) < ctzl(sizetmp)) { 62153018216SPaolo Bonzini sizetmp = 1ULL << ctzl(mem_start); 62253018216SPaolo Bonzini } 62353018216SPaolo Bonzini 624f1aa45ffSDaniel Henrique Barboza spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp); 62553018216SPaolo Bonzini node_size -= sizetmp; 62653018216SPaolo Bonzini mem_start += sizetmp; 62753018216SPaolo Bonzini } 62853018216SPaolo Bonzini } 62953018216SPaolo Bonzini 6306787d27bSMichael Roth /* Generate ibm,dynamic-reconfiguration-memory node if required */ 631fa523f0dSDavid Gibson if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) { 632fa523f0dSDavid Gibson int ret; 633fa523f0dSDavid Gibson 6346787d27bSMichael Roth g_assert(smc->dr_lmb_enabled); 63591335a5eSDavid Gibson ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt); 636417ece33SMichael Roth if (ret) { 6379b6c1da5SDaniel Henrique Barboza return ret; 638417ece33SMichael Roth } 6396787d27bSMichael Roth } 6406787d27bSMichael Roth 64153018216SPaolo Bonzini return 0; 64253018216SPaolo Bonzini } 64353018216SPaolo Bonzini 64491335a5eSDavid Gibson static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset, 64553018216SPaolo Bonzini SpaprMachineState *spapr) 64653018216SPaolo Bonzini { 64753018216SPaolo Bonzini MachineState *ms = MACHINE(spapr); 64853018216SPaolo Bonzini PowerPCCPU *cpu = POWERPC_CPU(cs); 64953018216SPaolo Bonzini CPUPPCState *env = &cpu->env; 65053018216SPaolo Bonzini PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 65153018216SPaolo Bonzini int index = spapr_get_vcpu_id(cpu); 65253018216SPaolo Bonzini uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 65353018216SPaolo Bonzini 0xffffffff, 0xffffffff}; 65453018216SPaolo Bonzini uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() 65553018216SPaolo Bonzini : SPAPR_TIMEBASE_FREQ; 65653018216SPaolo Bonzini uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; 65753018216SPaolo Bonzini uint32_t page_sizes_prop[64]; 65853018216SPaolo Bonzini size_t page_sizes_prop_size; 65953018216SPaolo Bonzini unsigned int smp_threads = ms->smp.threads; 66053018216SPaolo Bonzini uint32_t vcpus_per_socket = smp_threads * ms->smp.cores; 66153018216SPaolo Bonzini uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; 66253018216SPaolo Bonzini int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu)); 66353018216SPaolo Bonzini SpaprDrc *drc; 66453018216SPaolo Bonzini int drc_index; 66553018216SPaolo Bonzini uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ]; 66653018216SPaolo Bonzini int i; 66753018216SPaolo Bonzini 66853018216SPaolo Bonzini drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index); 66953018216SPaolo Bonzini if (drc) { 67053018216SPaolo Bonzini drc_index = spapr_drc_index(drc); 67153018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index))); 6722a6593cbSAlexey Kardashevskiy } 6732a6593cbSAlexey Kardashevskiy 6742a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "reg", index))); 6752a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu"))); 6762a6593cbSAlexey Kardashevskiy 6772a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR]))); 6782a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size", 6792a6593cbSAlexey Kardashevskiy env->dcache_line_size))); 6802a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size", 6812a6593cbSAlexey Kardashevskiy env->dcache_line_size))); 6822a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size", 6832a6593cbSAlexey Kardashevskiy env->icache_line_size))); 6842a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size", 6852a6593cbSAlexey Kardashevskiy env->icache_line_size))); 6862a6593cbSAlexey Kardashevskiy 6872a6593cbSAlexey Kardashevskiy if (pcc->l1_dcache_size) { 6882a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size", 6892a6593cbSAlexey Kardashevskiy pcc->l1_dcache_size))); 6902a6593cbSAlexey Kardashevskiy } else { 6912a6593cbSAlexey Kardashevskiy warn_report("Unknown L1 dcache size for cpu"); 6922a6593cbSAlexey Kardashevskiy } 6932a6593cbSAlexey Kardashevskiy if (pcc->l1_icache_size) { 6942a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size", 6952a6593cbSAlexey Kardashevskiy pcc->l1_icache_size))); 6962a6593cbSAlexey Kardashevskiy } else { 6972a6593cbSAlexey Kardashevskiy warn_report("Unknown L1 icache size for cpu"); 6982a6593cbSAlexey Kardashevskiy } 6992a6593cbSAlexey Kardashevskiy 7002a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq))); 7012a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq))); 7022a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size))); 7032a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size))); 7042a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, offset, "status", "okay"))); 7052a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0))); 7062a6593cbSAlexey Kardashevskiy 70753018216SPaolo Bonzini if (env->spr_cb[SPR_PURR].oea_read) { 70853018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1))); 70953018216SPaolo Bonzini } 71053018216SPaolo Bonzini if (env->spr_cb[SPR_SPURR].oea_read) { 71153018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1))); 71253018216SPaolo Bonzini } 7135fe269b1SPaul Mackerras 71453018216SPaolo Bonzini if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) { 71553018216SPaolo Bonzini _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes", 71653018216SPaolo Bonzini segs, sizeof(segs)))); 71753018216SPaolo Bonzini } 7185fe269b1SPaul Mackerras 7195fe269b1SPaul Mackerras /* Advertise VSX (vector extensions) if available 7205fe269b1SPaul Mackerras * 1 == VMX / Altivec available 7215fe269b1SPaul Mackerras * 2 == VSX available 7225fe269b1SPaul Mackerras * 72353018216SPaolo Bonzini * Only CPUs for which we create core types in spapr_cpu_core.c 72453018216SPaolo Bonzini * are possible, and all of those have VMX */ 72553018216SPaolo Bonzini if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) { 72653018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2))); 72753018216SPaolo Bonzini } else { 72853018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1))); 72928e02042SDavid Gibson } 73053018216SPaolo Bonzini 731fb164994SDavid Gibson /* Advertise DFP (Decimal Floating Point) if available 7327db8a127SAlexey Kardashevskiy * 0 / no property == no DFP 7337db8a127SAlexey Kardashevskiy * 1 == DFP available */ 7347db8a127SAlexey Kardashevskiy if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) { 7357db8a127SAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1))); 73653018216SPaolo Bonzini } 7377db8a127SAlexey Kardashevskiy 7387db8a127SAlexey Kardashevskiy page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop, 7397db8a127SAlexey Kardashevskiy sizeof(page_sizes_prop)); 740fb164994SDavid Gibson if (page_sizes_prop_size) { 7417db8a127SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes", 74253018216SPaolo Bonzini page_sizes_prop, page_sizes_prop_size))); 74353018216SPaolo Bonzini } 7447db8a127SAlexey Kardashevskiy 74591335a5eSDavid Gibson spapr_dt_pa_features(spapr, cpu, fdt, offset); 74653018216SPaolo Bonzini 7477db8a127SAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", 7487db8a127SAlexey Kardashevskiy cs->cpu_index / vcpus_per_socket))); 7497db8a127SAlexey Kardashevskiy 75053018216SPaolo Bonzini _FDT((fdt_setprop(fdt, offset, "ibm,pft-size", 751fb164994SDavid Gibson pft_size_prop, sizeof(pft_size_prop)))); 7525fe269b1SPaul Mackerras 7535fe269b1SPaul Mackerras if (ms->numa_state->num_nodes > 1) { 7548f86a408SDaniel Henrique Barboza _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu)); 7555fe269b1SPaul Mackerras } 7565fe269b1SPaul Mackerras 7577db8a127SAlexey Kardashevskiy _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt)); 7587db8a127SAlexey Kardashevskiy 7597db8a127SAlexey Kardashevskiy if (pcc->radix_page_info) { 7607db8a127SAlexey Kardashevskiy for (i = 0; i < pcc->radix_page_info->count; i++) { 7617db8a127SAlexey Kardashevskiy radix_AP_encodings[i] = 7627db8a127SAlexey Kardashevskiy cpu_to_be32(pcc->radix_page_info->entries[i]); 7636010818cSAlexey Kardashevskiy } 7646010818cSAlexey Kardashevskiy _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings", 7656010818cSAlexey Kardashevskiy radix_AP_encodings, 7666010818cSAlexey Kardashevskiy pcc->radix_page_info->count * 7676010818cSAlexey Kardashevskiy sizeof(radix_AP_encodings[0])))); 7686010818cSAlexey Kardashevskiy } 7696010818cSAlexey Kardashevskiy 7706010818cSAlexey Kardashevskiy /* 7716010818cSAlexey Kardashevskiy * We set this property to let the guest know that it can use the large 7726010818cSAlexey Kardashevskiy * decrementer and its width in bits. 7736010818cSAlexey Kardashevskiy */ 7746010818cSAlexey Kardashevskiy if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF) 77553018216SPaolo Bonzini _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits", 77653018216SPaolo Bonzini pcc->lrg_decr_bits))); 77753018216SPaolo Bonzini } 77853018216SPaolo Bonzini 77991335a5eSDavid Gibson static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr) 78053018216SPaolo Bonzini { 78153018216SPaolo Bonzini CPUState **rev; 78253018216SPaolo Bonzini CPUState *cs; 78353018216SPaolo Bonzini int n_cpus; 78453018216SPaolo Bonzini int cpus_offset; 78553018216SPaolo Bonzini int i; 78653018216SPaolo Bonzini 78753018216SPaolo Bonzini cpus_offset = fdt_add_subnode(fdt, 0, "cpus"); 78853018216SPaolo Bonzini _FDT(cpus_offset); 78953018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1))); 79053018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0))); 79153018216SPaolo Bonzini 79253018216SPaolo Bonzini /* 79353018216SPaolo Bonzini * We walk the CPUs in reverse order to ensure that CPU DT nodes 79453018216SPaolo Bonzini * created by fdt_add_subnode() end up in the right order in FDT 79553018216SPaolo Bonzini * for the guest kernel the enumerate the CPUs correctly. 79653018216SPaolo Bonzini * 79753018216SPaolo Bonzini * The CPU list cannot be traversed in reverse order, so we need 79853018216SPaolo Bonzini * to do extra work. 79953018216SPaolo Bonzini */ 80053018216SPaolo Bonzini n_cpus = 0; 80153018216SPaolo Bonzini rev = NULL; 80253018216SPaolo Bonzini CPU_FOREACH(cs) { 80353018216SPaolo Bonzini rev = g_renew(CPUState *, rev, n_cpus + 1); 80453018216SPaolo Bonzini rev[n_cpus++] = cs; 80553018216SPaolo Bonzini } 80653018216SPaolo Bonzini 80753018216SPaolo Bonzini for (i = n_cpus - 1; i >= 0; i--) { 80853018216SPaolo Bonzini CPUState *cs = rev[i]; 80953018216SPaolo Bonzini PowerPCCPU *cpu = POWERPC_CPU(cs); 8100da6f3feSBharata B Rao int index = spapr_get_vcpu_id(cpu); 8110da6f3feSBharata B Rao DeviceClass *dc = DEVICE_GET_CLASS(cs); 8127265bc3eSDaniel Henrique Barboza g_autofree char *nodename = NULL; 81353018216SPaolo Bonzini int offset; 81453018216SPaolo Bonzini 8150da6f3feSBharata B Rao if (!spapr_is_thread0_in_vcore(spapr, cpu)) { 8160da6f3feSBharata B Rao continue; 8170da6f3feSBharata B Rao } 8180da6f3feSBharata B Rao 8190da6f3feSBharata B Rao nodename = g_strdup_printf("%s@%x", dc->fw_name, index); 8200da6f3feSBharata B Rao offset = fdt_add_subnode(fdt, cpus_offset, nodename); 8210da6f3feSBharata B Rao _FDT(offset); 82291335a5eSDavid Gibson spapr_dt_cpu(cs, fdt, offset, spapr); 8230da6f3feSBharata B Rao } 8240da6f3feSBharata B Rao 8250da6f3feSBharata B Rao g_free(rev); 8260da6f3feSBharata B Rao } 82722419c2aSDavid Gibson 82891335a5eSDavid Gibson static int spapr_dt_rng(void *fdt) 8290da6f3feSBharata B Rao { 8300da6f3feSBharata B Rao int node; 8310da6f3feSBharata B Rao int ret; 8320da6f3feSBharata B Rao 8330da6f3feSBharata B Rao node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities"); 8340da6f3feSBharata B Rao if (node <= 0) { 8350da6f3feSBharata B Rao return -1; 8360da6f3feSBharata B Rao } 8370da6f3feSBharata B Rao ret = fdt_setprop_string(fdt, node, "device_type", 8380da6f3feSBharata B Rao "ibm,platform-facilities"); 8390da6f3feSBharata B Rao ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1); 8400da6f3feSBharata B Rao ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0); 8410da6f3feSBharata B Rao 8420da6f3feSBharata B Rao node = fdt_add_subnode(fdt, node, "ibm,random-v1"); 8430da6f3feSBharata B Rao if (node <= 0) { 8440da6f3feSBharata B Rao return -1; 8450da6f3feSBharata B Rao } 8460da6f3feSBharata B Rao ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random"); 8470da6f3feSBharata B Rao 8480da6f3feSBharata B Rao return ret ? -1 : 0; 8490da6f3feSBharata B Rao } 8500da6f3feSBharata B Rao 851ce2918cbSDavid Gibson static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) 8523f5dabceSDavid Gibson { 853fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 8543f5dabceSDavid Gibson int rtas; 8553f5dabceSDavid Gibson GString *hypertas = g_string_sized_new(256); 8563f5dabceSDavid Gibson GString *qemu_hypertas = g_string_sized_new(256); 8570c9269a5SDavid Hildenbrand uint64_t max_device_addr = MACHINE(spapr)->device_memory->base + 858b0c14ec4SDavid Hildenbrand memory_region_size(&MACHINE(spapr)->device_memory->mr); 8593f5dabceSDavid Gibson uint32_t lrdr_capacity[] = { 8600c9269a5SDavid Hildenbrand cpu_to_be32(max_device_addr >> 32), 8610c9269a5SDavid Hildenbrand cpu_to_be32(max_device_addr & 0xffffffff), 8627abf9797SAnton Blanchard cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32), 8637abf9797SAnton Blanchard cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff), 864fe6b6346SLike Xu cpu_to_be32(ms->smp.max_cpus / ms->smp.threads), 8653f5dabceSDavid Gibson }; 8663f5dabceSDavid Gibson 8673f5dabceSDavid Gibson _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); 8683f5dabceSDavid Gibson 8693f5dabceSDavid Gibson /* hypertas */ 8703f5dabceSDavid Gibson add_str(hypertas, "hcall-pft"); 8713f5dabceSDavid Gibson add_str(hypertas, "hcall-term"); 8723f5dabceSDavid Gibson add_str(hypertas, "hcall-dabr"); 8733f5dabceSDavid Gibson add_str(hypertas, "hcall-interrupt"); 8743f5dabceSDavid Gibson add_str(hypertas, "hcall-tce"); 8753f5dabceSDavid Gibson add_str(hypertas, "hcall-vio"); 8763f5dabceSDavid Gibson add_str(hypertas, "hcall-splpar"); 87710741314SNicholas Piggin add_str(hypertas, "hcall-join"); 8783f5dabceSDavid Gibson add_str(hypertas, "hcall-bulk"); 8793f5dabceSDavid Gibson add_str(hypertas, "hcall-set-mode"); 8803f5dabceSDavid Gibson add_str(hypertas, "hcall-sprg0"); 8813f5dabceSDavid Gibson add_str(hypertas, "hcall-copy"); 8823f5dabceSDavid Gibson add_str(hypertas, "hcall-debug"); 883c24ba3d0SLaurent Vivier add_str(hypertas, "hcall-vphn"); 8843f5dabceSDavid Gibson add_str(qemu_hypertas, "hcall-memop1"); 8853f5dabceSDavid Gibson 8863f5dabceSDavid Gibson if (!kvm_enabled() || kvmppc_spapr_use_multitce()) { 8873f5dabceSDavid Gibson add_str(hypertas, "hcall-multi-tce"); 8883f5dabceSDavid Gibson } 88930f4b05bSDavid Gibson 89030f4b05bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 89130f4b05bSDavid Gibson add_str(hypertas, "hcall-hpt-resize"); 89230f4b05bSDavid Gibson } 89330f4b05bSDavid Gibson 8943f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions", 8953f5dabceSDavid Gibson hypertas->str, hypertas->len)); 8963f5dabceSDavid Gibson g_string_free(hypertas, TRUE); 8973f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions", 8983f5dabceSDavid Gibson qemu_hypertas->str, qemu_hypertas->len)); 8993f5dabceSDavid Gibson g_string_free(qemu_hypertas, TRUE); 9003f5dabceSDavid Gibson 9011eee9950SDaniel Henrique Barboza spapr_numa_write_rtas_dt(spapr, fdt, rtas); 902da9f80fbSSerhii Popovych 9030e236d34SNicholas Piggin /* 9040e236d34SNicholas Piggin * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log, 9050e236d34SNicholas Piggin * and 16 bytes per CPU for system reset error log plus an extra 8 bytes. 9060e236d34SNicholas Piggin * 9070e236d34SNicholas Piggin * The system reset requirements are driven by existing Linux and PowerVM 9080e236d34SNicholas Piggin * implementation which (contrary to PAPR) saves r3 in the error log 9090e236d34SNicholas Piggin * structure like machine check, so Linux expects to find the saved r3 9100e236d34SNicholas Piggin * value at the address in r3 upon FWNMI-enabled sreset interrupt (and 9110e236d34SNicholas Piggin * does not look at the error value). 9120e236d34SNicholas Piggin * 9130e236d34SNicholas Piggin * System reset interrupts are not subject to interlock like machine 9140e236d34SNicholas Piggin * check, so this memory area could be corrupted if the sreset is 9150e236d34SNicholas Piggin * interrupted by a machine check (or vice versa) if it was shared. To 9160e236d34SNicholas Piggin * prevent this, system reset uses per-CPU areas for the sreset save 9170e236d34SNicholas Piggin * area. A system reset that interrupts a system reset handler could 9180e236d34SNicholas Piggin * still overwrite this area, but Linux doesn't try to recover in that 9190e236d34SNicholas Piggin * case anyway. 9200e236d34SNicholas Piggin * 9210e236d34SNicholas Piggin * The extra 8 bytes is required because Linux's FWNMI error log check 9220e236d34SNicholas Piggin * is off-by-one. 9230e236d34SNicholas Piggin */ 9240e236d34SNicholas Piggin _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_ERROR_LOG_MAX + 9250e236d34SNicholas Piggin ms->smp.max_cpus * sizeof(uint64_t)*2 + sizeof(uint64_t))); 9263f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max", 9273f5dabceSDavid Gibson RTAS_ERROR_LOG_MAX)); 9283f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate", 9293f5dabceSDavid Gibson RTAS_EVENT_SCAN_RATE)); 9303f5dabceSDavid Gibson 9314f441474SDavid Gibson g_assert(msi_nonbroken); 9323f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0)); 9333f5dabceSDavid Gibson 9343f5dabceSDavid Gibson /* 9353f5dabceSDavid Gibson * According to PAPR, rtas ibm,os-term does not guarantee a return 9363f5dabceSDavid Gibson * back to the guest cpu. 9373f5dabceSDavid Gibson * 9383f5dabceSDavid Gibson * While an additional ibm,extended-os-term property indicates 9393f5dabceSDavid Gibson * that rtas call return will always occur. Set this property. 9403f5dabceSDavid Gibson */ 9413f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0)); 9423f5dabceSDavid Gibson 9433f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity", 9443f5dabceSDavid Gibson lrdr_capacity, sizeof(lrdr_capacity))); 9453f5dabceSDavid Gibson 9463f5dabceSDavid Gibson spapr_dt_rtas_tokens(fdt, rtas); 9473f5dabceSDavid Gibson } 9483f5dabceSDavid Gibson 949db592b5bSCédric Le Goater /* 950db592b5bSCédric Le Goater * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU 951db592b5bSCédric Le Goater * and the XIVE features that the guest may request and thus the valid 952db592b5bSCédric Le Goater * values for bytes 23..26 of option vector 5: 953db592b5bSCédric Le Goater */ 954ce2918cbSDavid Gibson static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt, 955db592b5bSCédric Le Goater int chosen) 9569fb4541fSSam Bobroff { 957545d6e2bSSuraj Jitindar Singh PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); 958545d6e2bSSuraj Jitindar Singh 959f2b14e3aSCédric Le Goater char val[2 * 4] = { 960ca62823bSDavid Gibson 23, 0x00, /* XICS / XIVE mode */ 9619fb4541fSSam Bobroff 24, 0x00, /* Hash/Radix, filled in below. */ 9629fb4541fSSam Bobroff 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ 9639fb4541fSSam Bobroff 26, 0x40, /* Radix options: GTSE == yes. */ 9649fb4541fSSam Bobroff }; 9659fb4541fSSam Bobroff 966ca62823bSDavid Gibson if (spapr->irq->xics && spapr->irq->xive) { 967ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_BOTH; 968ca62823bSDavid Gibson } else if (spapr->irq->xive) { 969ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_EXPLOIT; 970ca62823bSDavid Gibson } else { 971ca62823bSDavid Gibson assert(spapr->irq->xics); 972ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_LEGACY; 973ca62823bSDavid Gibson } 974ca62823bSDavid Gibson 9757abd43baSSuraj Jitindar Singh if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0, 9767abd43baSSuraj Jitindar Singh first_ppc_cpu->compat_pvr)) { 977db592b5bSCédric Le Goater /* 978db592b5bSCédric Le Goater * If we're in a pre POWER9 compat mode then the guest should 979db592b5bSCédric Le Goater * do hash and use the legacy interrupt mode 980db592b5bSCédric Le Goater */ 981ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */ 9827abd43baSSuraj Jitindar Singh val[3] = 0x00; /* Hash */ 9837abd43baSSuraj Jitindar Singh } else if (kvm_enabled()) { 9849fb4541fSSam Bobroff if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) { 985f2b14e3aSCédric Le Goater val[3] = 0x80; /* OV5_MMU_BOTH */ 9869fb4541fSSam Bobroff } else if (kvmppc_has_cap_mmu_radix()) { 987f2b14e3aSCédric Le Goater val[3] = 0x40; /* OV5_MMU_RADIX_300 */ 9889fb4541fSSam Bobroff } else { 989f2b14e3aSCédric Le Goater val[3] = 0x00; /* Hash */ 9909fb4541fSSam Bobroff } 9919fb4541fSSam Bobroff } else { 9927abd43baSSuraj Jitindar Singh /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */ 993f2b14e3aSCédric Le Goater val[3] = 0xC0; 994545d6e2bSSuraj Jitindar Singh } 9959fb4541fSSam Bobroff _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support", 9969fb4541fSSam Bobroff val, sizeof(val))); 9979fb4541fSSam Bobroff } 9989fb4541fSSam Bobroff 9991e0e1108SDavid Gibson static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) 10007c866c6aSDavid Gibson { 10017c866c6aSDavid Gibson MachineState *machine = MACHINE(spapr); 10026c3829a2SAlexey Kardashevskiy SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 10037c866c6aSDavid Gibson int chosen; 10041e0e1108SDavid Gibson 10051e0e1108SDavid Gibson _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); 10061e0e1108SDavid Gibson 10071e0e1108SDavid Gibson if (reset) { 10087c866c6aSDavid Gibson const char *boot_device = machine->boot_order; 10097c866c6aSDavid Gibson char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); 10107c866c6aSDavid Gibson size_t cb = 0; 1011907aac2fSMark Cave-Ayland char *bootlist = get_boot_devices_list(&cb); 10127c866c6aSDavid Gibson 10135ced7895SAlexey Kardashevskiy if (machine->kernel_cmdline && machine->kernel_cmdline[0]) { 10145ced7895SAlexey Kardashevskiy _FDT(fdt_setprop_string(fdt, chosen, "bootargs", 10155ced7895SAlexey Kardashevskiy machine->kernel_cmdline)); 10165ced7895SAlexey Kardashevskiy } 10171e0e1108SDavid Gibson 10185ced7895SAlexey Kardashevskiy if (spapr->initrd_size) { 10197c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start", 10207c866c6aSDavid Gibson spapr->initrd_base)); 10217c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end", 10227c866c6aSDavid Gibson spapr->initrd_base + spapr->initrd_size)); 10235ced7895SAlexey Kardashevskiy } 10247c866c6aSDavid Gibson 10257c866c6aSDavid Gibson if (spapr->kernel_size) { 102687262806SAlexey Kardashevskiy uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr), 10277c866c6aSDavid Gibson cpu_to_be64(spapr->kernel_size) }; 10287c866c6aSDavid Gibson 10297c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel", 10307c866c6aSDavid Gibson &kprop, sizeof(kprop))); 10317c866c6aSDavid Gibson if (spapr->kernel_le) { 10327c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0)); 10337c866c6aSDavid Gibson } 10347c866c6aSDavid Gibson } 10357c866c6aSDavid Gibson if (boot_menu) { 10367c866c6aSDavid Gibson _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu))); 10377c866c6aSDavid Gibson } 10387c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width)); 10397c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height)); 10407c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth)); 10417c866c6aSDavid Gibson 10427c866c6aSDavid Gibson if (cb && bootlist) { 10437c866c6aSDavid Gibson int i; 10447c866c6aSDavid Gibson 10457c866c6aSDavid Gibson for (i = 0; i < cb; i++) { 10467c866c6aSDavid Gibson if (bootlist[i] == '\n') { 10477c866c6aSDavid Gibson bootlist[i] = ' '; 10487c866c6aSDavid Gibson } 10497c866c6aSDavid Gibson } 10507c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist)); 10517c866c6aSDavid Gibson } 10527c866c6aSDavid Gibson 10537c866c6aSDavid Gibson if (boot_device && strlen(boot_device)) { 10547c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device)); 10557c866c6aSDavid Gibson } 10567c866c6aSDavid Gibson 10577c866c6aSDavid Gibson if (!spapr->has_graphics && stdout_path) { 105890ee4e01SNikunj A Dadhania /* 10591e0e1108SDavid Gibson * "linux,stdout-path" and "stdout" properties are 10601e0e1108SDavid Gibson * deprecated by linux kernel. New platforms should only 10611e0e1108SDavid Gibson * use the "stdout-path" property. Set the new property 10621e0e1108SDavid Gibson * and continue using older property to remain compatible 10631e0e1108SDavid Gibson * with the existing firmware. 106490ee4e01SNikunj A Dadhania */ 10657c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path)); 106690ee4e01SNikunj A Dadhania _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path)); 10677c866c6aSDavid Gibson } 10687c866c6aSDavid Gibson 10691e0e1108SDavid Gibson /* 10701e0e1108SDavid Gibson * We can deal with BAR reallocation just fine, advertise it 10711e0e1108SDavid Gibson * to the guest 10721e0e1108SDavid Gibson */ 10736c3829a2SAlexey Kardashevskiy if (smc->linux_pci_probe) { 10746c3829a2SAlexey Kardashevskiy _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0)); 10756c3829a2SAlexey Kardashevskiy } 10766c3829a2SAlexey Kardashevskiy 1077db592b5bSCédric Le Goater spapr_dt_ov5_platform_support(spapr, fdt, chosen); 10789fb4541fSSam Bobroff 10797c866c6aSDavid Gibson g_free(stdout_path); 10807c866c6aSDavid Gibson g_free(bootlist); 10817c866c6aSDavid Gibson } 10827c866c6aSDavid Gibson 108391335a5eSDavid Gibson _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5")); 10841e0e1108SDavid Gibson } 10851e0e1108SDavid Gibson 1086ce2918cbSDavid Gibson static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt) 1087fca5f2dcSDavid Gibson { 1088fca5f2dcSDavid Gibson /* The /hypervisor node isn't in PAPR - this is a hack to allow PR 1089fca5f2dcSDavid Gibson * KVM to work under pHyp with some guest co-operation */ 1090fca5f2dcSDavid Gibson int hypervisor; 1091fca5f2dcSDavid Gibson uint8_t hypercall[16]; 1092fca5f2dcSDavid Gibson 1093fca5f2dcSDavid Gibson _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor")); 1094fca5f2dcSDavid Gibson /* indicate KVM hypercall interface */ 1095fca5f2dcSDavid Gibson _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm")); 1096fca5f2dcSDavid Gibson if (kvmppc_has_cap_fixup_hcalls()) { 1097fca5f2dcSDavid Gibson /* 1098fca5f2dcSDavid Gibson * Older KVM versions with older guest kernels were broken 1099fca5f2dcSDavid Gibson * with the magic page, don't allow the guest to map it. 1100fca5f2dcSDavid Gibson */ 1101fca5f2dcSDavid Gibson if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall, 1102fca5f2dcSDavid Gibson sizeof(hypercall))) { 1103fca5f2dcSDavid Gibson _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions", 1104fca5f2dcSDavid Gibson hypercall, sizeof(hypercall))); 1105fca5f2dcSDavid Gibson } 1106fca5f2dcSDavid Gibson } 1107fca5f2dcSDavid Gibson } 1108fca5f2dcSDavid Gibson 11090c21e073SDavid Gibson void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space) 111053018216SPaolo Bonzini { 1111c86c1affSDaniel Henrique Barboza MachineState *machine = MACHINE(spapr); 11123c0c47e3SDavid Gibson MachineClass *mc = MACHINE_GET_CLASS(machine); 1113ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 1114776e887fSGreg Kurz uint32_t root_drc_type_mask = 0; 11157c866c6aSDavid Gibson int ret; 111653018216SPaolo Bonzini void *fdt; 1117ce2918cbSDavid Gibson SpaprPhbState *phb; 1118398a0bd5SDavid Gibson char *buf; 111953018216SPaolo Bonzini 112097b32a6aSDavid Gibson fdt = g_malloc0(space); 112197b32a6aSDavid Gibson _FDT((fdt_create_empty_tree(fdt, space))); 112253018216SPaolo Bonzini 1123398a0bd5SDavid Gibson /* Root node */ 1124398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp")); 1125398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)")); 1126398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries")); 1127398a0bd5SDavid Gibson 11280a794529SDavid Gibson /* Guest UUID & Name*/ 1129398a0bd5SDavid Gibson buf = qemu_uuid_unparse_strdup(&qemu_uuid); 1130398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf)); 1131398a0bd5SDavid Gibson if (qemu_uuid_set) { 1132398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "system-id", buf)); 1133398a0bd5SDavid Gibson } 1134398a0bd5SDavid Gibson g_free(buf); 1135398a0bd5SDavid Gibson 1136398a0bd5SDavid Gibson if (qemu_get_vm_name()) { 1137398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name", 1138398a0bd5SDavid Gibson qemu_get_vm_name())); 1139398a0bd5SDavid Gibson } 1140398a0bd5SDavid Gibson 11410a794529SDavid Gibson /* Host Model & Serial Number */ 11420a794529SDavid Gibson if (spapr->host_model) { 11430a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model)); 11440a794529SDavid Gibson } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) { 11450a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-model", buf)); 11460a794529SDavid Gibson g_free(buf); 11470a794529SDavid Gibson } 11480a794529SDavid Gibson 11490a794529SDavid Gibson if (spapr->host_serial) { 11500a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial)); 11510a794529SDavid Gibson } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) { 11520a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf)); 11530a794529SDavid Gibson g_free(buf); 11540a794529SDavid Gibson } 11550a794529SDavid Gibson 1156398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2)); 1157398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); 115853018216SPaolo Bonzini 1159fc7e0765SDavid Gibson /* /interrupt controller */ 116005289273SDavid Gibson spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC); 1161fc7e0765SDavid Gibson 116291335a5eSDavid Gibson ret = spapr_dt_memory(spapr, fdt); 1163e8f986fcSBharata B Rao if (ret < 0) { 1164ce9863b7SCédric Le Goater error_report("couldn't setup memory nodes in fdt"); 1165e8f986fcSBharata B Rao exit(1); 116653018216SPaolo Bonzini } 116753018216SPaolo Bonzini 1168bf5a6696SDavid Gibson /* /vdevice */ 1169bf5a6696SDavid Gibson spapr_dt_vdevice(spapr->vio_bus, fdt); 117053018216SPaolo Bonzini 11714d9392beSThomas Huth if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) { 117291335a5eSDavid Gibson ret = spapr_dt_rng(fdt); 11734d9392beSThomas Huth if (ret < 0) { 1174ce9863b7SCédric Le Goater error_report("could not set up rng device in the fdt"); 11754d9392beSThomas Huth exit(1); 11764d9392beSThomas Huth } 11774d9392beSThomas Huth } 11784d9392beSThomas Huth 117953018216SPaolo Bonzini QLIST_FOREACH(phb, &spapr->phbs, list) { 11808cbe71ecSDavid Gibson ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL); 118153018216SPaolo Bonzini if (ret < 0) { 1182da34fed7SThomas Huth error_report("couldn't setup PCI devices in fdt"); 118353018216SPaolo Bonzini exit(1); 118453018216SPaolo Bonzini } 1185da34fed7SThomas Huth } 118653018216SPaolo Bonzini 118791335a5eSDavid Gibson spapr_dt_cpus(fdt, spapr); 118853018216SPaolo Bonzini 1189776e887fSGreg Kurz /* ibm,drc-indexes and friends */ 1190c20d332aSBharata B Rao if (smc->dr_lmb_enabled) { 1191776e887fSGreg Kurz root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB; 1192776e887fSGreg Kurz } 1193776e887fSGreg Kurz if (smc->dr_phb_enabled) { 1194776e887fSGreg Kurz root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB; 1195776e887fSGreg Kurz } 1196776e887fSGreg Kurz if (mc->nvdimm_supported) { 1197776e887fSGreg Kurz root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PMEM; 1198776e887fSGreg Kurz } 1199776e887fSGreg Kurz if (root_drc_type_mask) { 1200776e887fSGreg Kurz _FDT(spapr_dt_drc(fdt, 0, NULL, root_drc_type_mask)); 1201c20d332aSBharata B Rao } 1202c20d332aSBharata B Rao 1203c5514d0eSIgor Mammedov if (mc->has_hotpluggable_cpus) { 1204af81cf32SBharata B Rao int offset = fdt_path_offset(fdt, "/cpus"); 12059e7d38e8SDavid Gibson ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU); 1206af81cf32SBharata B Rao if (ret < 0) { 1207af81cf32SBharata B Rao error_report("Couldn't set up CPU DR device tree properties"); 1208af81cf32SBharata B Rao exit(1); 1209af81cf32SBharata B Rao } 1210af81cf32SBharata B Rao } 1211af81cf32SBharata B Rao 1212ffb1e275SDavid Gibson /* /event-sources */ 1213ffbb1705SMichael Roth spapr_dt_events(spapr, fdt); 1214ffb1e275SDavid Gibson 12153f5dabceSDavid Gibson /* /rtas */ 12163f5dabceSDavid Gibson spapr_dt_rtas(spapr, fdt); 12173f5dabceSDavid Gibson 12187c866c6aSDavid Gibson /* /chosen */ 12191e0e1108SDavid Gibson spapr_dt_chosen(spapr, fdt, reset); 1220cf6e5223SDavid Gibson 1221fca5f2dcSDavid Gibson /* /hypervisor */ 1222fca5f2dcSDavid Gibson if (kvm_enabled()) { 1223fca5f2dcSDavid Gibson spapr_dt_hypervisor(spapr, fdt); 1224fca5f2dcSDavid Gibson } 1225fca5f2dcSDavid Gibson 1226cf6e5223SDavid Gibson /* Build memory reserve map */ 1227a49f62b9SAlexey Kardashevskiy if (reset) { 1228cf6e5223SDavid Gibson if (spapr->kernel_size) { 122987262806SAlexey Kardashevskiy _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr, 123087262806SAlexey Kardashevskiy spapr->kernel_size))); 1231cf6e5223SDavid Gibson } 1232cf6e5223SDavid Gibson if (spapr->initrd_size) { 1233a49f62b9SAlexey Kardashevskiy _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, 1234a49f62b9SAlexey Kardashevskiy spapr->initrd_size))); 1235a49f62b9SAlexey Kardashevskiy } 1236cf6e5223SDavid Gibson } 1237cf6e5223SDavid Gibson 1238ee3a71e3SShivaprasad G Bhat /* NVDIMM devices */ 1239ee3a71e3SShivaprasad G Bhat if (mc->nvdimm_supported) { 1240f1aa45ffSDaniel Henrique Barboza spapr_dt_persistent_memory(spapr, fdt); 1241ee3a71e3SShivaprasad G Bhat } 1242ee3a71e3SShivaprasad G Bhat 1243997b6cfcSDavid Gibson return fdt; 124453018216SPaolo Bonzini } 124553018216SPaolo Bonzini 124653018216SPaolo Bonzini static uint64_t translate_kernel_address(void *opaque, uint64_t addr) 124753018216SPaolo Bonzini { 124887262806SAlexey Kardashevskiy SpaprMachineState *spapr = opaque; 124987262806SAlexey Kardashevskiy 125087262806SAlexey Kardashevskiy return (addr & 0x0fffffff) + spapr->kernel_addr; 125153018216SPaolo Bonzini } 125253018216SPaolo Bonzini 12531d1be34dSDavid Gibson static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, 12541d1be34dSDavid Gibson PowerPCCPU *cpu) 125553018216SPaolo Bonzini { 125653018216SPaolo Bonzini CPUPPCState *env = &cpu->env; 125753018216SPaolo Bonzini 12588d04fb55SJan Kiszka /* The TCG path should also be holding the BQL at this point */ 12598d04fb55SJan Kiszka g_assert(qemu_mutex_iothread_locked()); 12608d04fb55SJan Kiszka 126153018216SPaolo Bonzini if (msr_pr) { 126253018216SPaolo Bonzini hcall_dprintf("Hypercall made with MSR[PR]=1\n"); 126353018216SPaolo Bonzini env->gpr[3] = H_PRIVILEGE; 126453018216SPaolo Bonzini } else { 126553018216SPaolo Bonzini env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); 126653018216SPaolo Bonzini } 126753018216SPaolo Bonzini } 126853018216SPaolo Bonzini 126900fd075eSBenjamin Herrenschmidt struct LPCRSyncState { 127000fd075eSBenjamin Herrenschmidt target_ulong value; 127100fd075eSBenjamin Herrenschmidt target_ulong mask; 127200fd075eSBenjamin Herrenschmidt }; 127300fd075eSBenjamin Herrenschmidt 127400fd075eSBenjamin Herrenschmidt static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg) 127500fd075eSBenjamin Herrenschmidt { 127600fd075eSBenjamin Herrenschmidt struct LPCRSyncState *s = arg.host_ptr; 127700fd075eSBenjamin Herrenschmidt PowerPCCPU *cpu = POWERPC_CPU(cs); 127800fd075eSBenjamin Herrenschmidt CPUPPCState *env = &cpu->env; 127900fd075eSBenjamin Herrenschmidt target_ulong lpcr; 128000fd075eSBenjamin Herrenschmidt 128100fd075eSBenjamin Herrenschmidt cpu_synchronize_state(cs); 128200fd075eSBenjamin Herrenschmidt lpcr = env->spr[SPR_LPCR]; 128300fd075eSBenjamin Herrenschmidt lpcr &= ~s->mask; 128400fd075eSBenjamin Herrenschmidt lpcr |= s->value; 128500fd075eSBenjamin Herrenschmidt ppc_store_lpcr(cpu, lpcr); 128600fd075eSBenjamin Herrenschmidt } 128700fd075eSBenjamin Herrenschmidt 128800fd075eSBenjamin Herrenschmidt void spapr_set_all_lpcrs(target_ulong value, target_ulong mask) 128900fd075eSBenjamin Herrenschmidt { 129000fd075eSBenjamin Herrenschmidt CPUState *cs; 129100fd075eSBenjamin Herrenschmidt struct LPCRSyncState s = { 129200fd075eSBenjamin Herrenschmidt .value = value, 129300fd075eSBenjamin Herrenschmidt .mask = mask 129400fd075eSBenjamin Herrenschmidt }; 129500fd075eSBenjamin Herrenschmidt CPU_FOREACH(cs) { 129600fd075eSBenjamin Herrenschmidt run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s)); 129700fd075eSBenjamin Herrenschmidt } 129800fd075eSBenjamin Herrenschmidt } 129900fd075eSBenjamin Herrenschmidt 130079825f4dSBenjamin Herrenschmidt static void spapr_get_pate(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry) 13019861bb3eSSuraj Jitindar Singh { 1302ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 13039861bb3eSSuraj Jitindar Singh 130479825f4dSBenjamin Herrenschmidt /* Copy PATE1:GR into PATE0:HR */ 130579825f4dSBenjamin Herrenschmidt entry->dw0 = spapr->patb_entry & PATE0_HR; 130679825f4dSBenjamin Herrenschmidt entry->dw1 = spapr->patb_entry; 13079861bb3eSSuraj Jitindar Singh } 13089861bb3eSSuraj Jitindar Singh 1309e6b8fd24SSamuel Mendoza-Jonas #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) 1310e6b8fd24SSamuel Mendoza-Jonas #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) 1311e6b8fd24SSamuel Mendoza-Jonas #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) 1312e6b8fd24SSamuel Mendoza-Jonas #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY)) 1313e6b8fd24SSamuel Mendoza-Jonas #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY)) 1314e6b8fd24SSamuel Mendoza-Jonas 1315715c5407SDavid Gibson /* 1316715c5407SDavid Gibson * Get the fd to access the kernel htab, re-opening it if necessary 1317715c5407SDavid Gibson */ 1318ce2918cbSDavid Gibson static int get_htab_fd(SpaprMachineState *spapr) 1319715c5407SDavid Gibson { 132014b0d748SGreg Kurz Error *local_err = NULL; 132114b0d748SGreg Kurz 1322715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1323715c5407SDavid Gibson return spapr->htab_fd; 1324715c5407SDavid Gibson } 1325715c5407SDavid Gibson 132614b0d748SGreg Kurz spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err); 1327715c5407SDavid Gibson if (spapr->htab_fd < 0) { 132814b0d748SGreg Kurz error_report_err(local_err); 1329715c5407SDavid Gibson } 1330715c5407SDavid Gibson 1331715c5407SDavid Gibson return spapr->htab_fd; 1332715c5407SDavid Gibson } 1333715c5407SDavid Gibson 1334ce2918cbSDavid Gibson void close_htab_fd(SpaprMachineState *spapr) 1335715c5407SDavid Gibson { 1336715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1337715c5407SDavid Gibson close(spapr->htab_fd); 1338715c5407SDavid Gibson } 1339715c5407SDavid Gibson spapr->htab_fd = -1; 1340715c5407SDavid Gibson } 1341715c5407SDavid Gibson 1342e57ca75cSDavid Gibson static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp) 1343e57ca75cSDavid Gibson { 1344ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1345e57ca75cSDavid Gibson 1346e57ca75cSDavid Gibson return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1; 1347e57ca75cSDavid Gibson } 1348e57ca75cSDavid Gibson 13491ec26c75SGreg Kurz static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp) 13501ec26c75SGreg Kurz { 1351ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 13521ec26c75SGreg Kurz 13531ec26c75SGreg Kurz assert(kvm_enabled()); 13541ec26c75SGreg Kurz 13551ec26c75SGreg Kurz if (!spapr->htab) { 13561ec26c75SGreg Kurz return 0; 13571ec26c75SGreg Kurz } 13581ec26c75SGreg Kurz 13591ec26c75SGreg Kurz return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18); 13601ec26c75SGreg Kurz } 13611ec26c75SGreg Kurz 1362e57ca75cSDavid Gibson static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp, 1363e57ca75cSDavid Gibson hwaddr ptex, int n) 1364e57ca75cSDavid Gibson { 1365ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1366e57ca75cSDavid Gibson hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 1367e57ca75cSDavid Gibson 1368e57ca75cSDavid Gibson if (!spapr->htab) { 1369e57ca75cSDavid Gibson /* 1370e57ca75cSDavid Gibson * HTAB is controlled by KVM. Fetch into temporary buffer 1371e57ca75cSDavid Gibson */ 1372e57ca75cSDavid Gibson ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64); 1373e57ca75cSDavid Gibson kvmppc_read_hptes(hptes, ptex, n); 1374e57ca75cSDavid Gibson return hptes; 1375e57ca75cSDavid Gibson } 1376e57ca75cSDavid Gibson 1377e57ca75cSDavid Gibson /* 1378e57ca75cSDavid Gibson * HTAB is controlled by QEMU. Just point to the internally 1379e57ca75cSDavid Gibson * accessible PTEG. 1380e57ca75cSDavid Gibson */ 1381e57ca75cSDavid Gibson return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset); 1382e57ca75cSDavid Gibson } 1383e57ca75cSDavid Gibson 1384e57ca75cSDavid Gibson static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp, 1385e57ca75cSDavid Gibson const ppc_hash_pte64_t *hptes, 1386e57ca75cSDavid Gibson hwaddr ptex, int n) 1387e57ca75cSDavid Gibson { 1388ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1389e57ca75cSDavid Gibson 1390e57ca75cSDavid Gibson if (!spapr->htab) { 1391e57ca75cSDavid Gibson g_free((void *)hptes); 1392e57ca75cSDavid Gibson } 1393e57ca75cSDavid Gibson 1394e57ca75cSDavid Gibson /* Nothing to do for qemu managed HPT */ 1395e57ca75cSDavid Gibson } 1396e57ca75cSDavid Gibson 1397a2dd4e83SBenjamin Herrenschmidt void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 1398e57ca75cSDavid Gibson uint64_t pte0, uint64_t pte1) 1399e57ca75cSDavid Gibson { 1400a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp); 1401e57ca75cSDavid Gibson hwaddr offset = ptex * HASH_PTE_SIZE_64; 1402e57ca75cSDavid Gibson 1403e57ca75cSDavid Gibson if (!spapr->htab) { 1404e57ca75cSDavid Gibson kvmppc_write_hpte(ptex, pte0, pte1); 1405e57ca75cSDavid Gibson } else { 14063054b0caSBenjamin Herrenschmidt if (pte0 & HPTE64_V_VALID) { 1407e57ca75cSDavid Gibson stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); 14083054b0caSBenjamin Herrenschmidt /* 14093054b0caSBenjamin Herrenschmidt * When setting valid, we write PTE1 first. This ensures 14103054b0caSBenjamin Herrenschmidt * proper synchronization with the reading code in 14113054b0caSBenjamin Herrenschmidt * ppc_hash64_pteg_search() 14123054b0caSBenjamin Herrenschmidt */ 14133054b0caSBenjamin Herrenschmidt smp_wmb(); 14143054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset, pte0); 14153054b0caSBenjamin Herrenschmidt } else { 14163054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset, pte0); 14173054b0caSBenjamin Herrenschmidt /* 14183054b0caSBenjamin Herrenschmidt * When clearing it we set PTE0 first. This ensures proper 14193054b0caSBenjamin Herrenschmidt * synchronization with the reading code in 14203054b0caSBenjamin Herrenschmidt * ppc_hash64_pteg_search() 14213054b0caSBenjamin Herrenschmidt */ 14223054b0caSBenjamin Herrenschmidt smp_wmb(); 14233054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); 14243054b0caSBenjamin Herrenschmidt } 1425e57ca75cSDavid Gibson } 1426e57ca75cSDavid Gibson } 1427e57ca75cSDavid Gibson 1428a2dd4e83SBenjamin Herrenschmidt static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1429a2dd4e83SBenjamin Herrenschmidt uint64_t pte1) 1430a2dd4e83SBenjamin Herrenschmidt { 1431a2dd4e83SBenjamin Herrenschmidt hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15; 1432a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1433a2dd4e83SBenjamin Herrenschmidt 1434a2dd4e83SBenjamin Herrenschmidt if (!spapr->htab) { 1435a2dd4e83SBenjamin Herrenschmidt /* There should always be a hash table when this is called */ 1436a2dd4e83SBenjamin Herrenschmidt error_report("spapr_hpte_set_c called with no hash table !"); 1437a2dd4e83SBenjamin Herrenschmidt return; 1438a2dd4e83SBenjamin Herrenschmidt } 1439a2dd4e83SBenjamin Herrenschmidt 1440a2dd4e83SBenjamin Herrenschmidt /* The HW performs a non-atomic byte update */ 1441a2dd4e83SBenjamin Herrenschmidt stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80); 1442a2dd4e83SBenjamin Herrenschmidt } 1443a2dd4e83SBenjamin Herrenschmidt 1444a2dd4e83SBenjamin Herrenschmidt static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1445a2dd4e83SBenjamin Herrenschmidt uint64_t pte1) 1446a2dd4e83SBenjamin Herrenschmidt { 1447a2dd4e83SBenjamin Herrenschmidt hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14; 1448a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1449a2dd4e83SBenjamin Herrenschmidt 1450a2dd4e83SBenjamin Herrenschmidt if (!spapr->htab) { 1451a2dd4e83SBenjamin Herrenschmidt /* There should always be a hash table when this is called */ 1452a2dd4e83SBenjamin Herrenschmidt error_report("spapr_hpte_set_r called with no hash table !"); 1453a2dd4e83SBenjamin Herrenschmidt return; 1454a2dd4e83SBenjamin Herrenschmidt } 1455a2dd4e83SBenjamin Herrenschmidt 1456a2dd4e83SBenjamin Herrenschmidt /* The HW performs a non-atomic byte update */ 1457a2dd4e83SBenjamin Herrenschmidt stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01); 1458a2dd4e83SBenjamin Herrenschmidt } 1459a2dd4e83SBenjamin Herrenschmidt 14600b0b8310SDavid Gibson int spapr_hpt_shift_for_ramsize(uint64_t ramsize) 14618dfe8e7fSDavid Gibson { 14628dfe8e7fSDavid Gibson int shift; 14638dfe8e7fSDavid Gibson 14648dfe8e7fSDavid Gibson /* We aim for a hash table of size 1/128 the size of RAM (rounded 14658dfe8e7fSDavid Gibson * up). The PAPR recommendation is actually 1/64 of RAM size, but 14668dfe8e7fSDavid Gibson * that's much more than is needed for Linux guests */ 14678dfe8e7fSDavid Gibson shift = ctz64(pow2ceil(ramsize)) - 7; 14688dfe8e7fSDavid Gibson shift = MAX(shift, 18); /* Minimum architected size */ 14698dfe8e7fSDavid Gibson shift = MIN(shift, 46); /* Maximum architected size */ 14708dfe8e7fSDavid Gibson return shift; 14718dfe8e7fSDavid Gibson } 14728dfe8e7fSDavid Gibson 1473ce2918cbSDavid Gibson void spapr_free_hpt(SpaprMachineState *spapr) 147406ec79e8SBharata B Rao { 147506ec79e8SBharata B Rao g_free(spapr->htab); 147606ec79e8SBharata B Rao spapr->htab = NULL; 147706ec79e8SBharata B Rao spapr->htab_shift = 0; 147806ec79e8SBharata B Rao close_htab_fd(spapr); 147906ec79e8SBharata B Rao } 148006ec79e8SBharata B Rao 1481a4e3a7c0SGreg Kurz int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp) 148253018216SPaolo Bonzini { 1483c3e051edSGreg Kurz ERRP_GUARD(); 1484c5f54f3eSDavid Gibson long rc; 148553018216SPaolo Bonzini 1486c5f54f3eSDavid Gibson /* Clean up any HPT info from a previous boot */ 148706ec79e8SBharata B Rao spapr_free_hpt(spapr); 148853018216SPaolo Bonzini 1489c5f54f3eSDavid Gibson rc = kvmppc_reset_htab(shift); 1490f0638a0bSFabiano Rosas 1491f0638a0bSFabiano Rosas if (rc == -EOPNOTSUPP) { 1492f0638a0bSFabiano Rosas error_setg(errp, "HPT not supported in nested guests"); 1493a4e3a7c0SGreg Kurz return -EOPNOTSUPP; 1494f0638a0bSFabiano Rosas } 1495f0638a0bSFabiano Rosas 1496c5f54f3eSDavid Gibson if (rc < 0) { 1497c5f54f3eSDavid Gibson /* kernel-side HPT needed, but couldn't allocate one */ 1498c3e051edSGreg Kurz error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d", 1499c5f54f3eSDavid Gibson shift); 1500c3e051edSGreg Kurz error_append_hint(errp, "Try smaller maxmem?\n"); 1501a4e3a7c0SGreg Kurz return -errno; 1502c5f54f3eSDavid Gibson } else if (rc > 0) { 1503c5f54f3eSDavid Gibson /* kernel-side HPT allocated */ 1504c5f54f3eSDavid Gibson if (rc != shift) { 1505c5f54f3eSDavid Gibson error_setg(errp, 1506c3e051edSGreg Kurz "Requested order %d HPT, but kernel allocated order %ld", 1507c5f54f3eSDavid Gibson shift, rc); 1508c3e051edSGreg Kurz error_append_hint(errp, "Try smaller maxmem?\n"); 1509a4e3a7c0SGreg Kurz return -ENOSPC; 15107735fedaSBharata B Rao } 15117735fedaSBharata B Rao 151253018216SPaolo Bonzini spapr->htab_shift = shift; 1513c18ad9a5SDavid Gibson spapr->htab = NULL; 1514b817772aSBharata B Rao } else { 1515c5f54f3eSDavid Gibson /* kernel-side HPT not needed, allocate in userspace instead */ 1516c5f54f3eSDavid Gibson size_t size = 1ULL << shift; 1517c5f54f3eSDavid Gibson int i; 151801a57972SSamuel Mendoza-Jonas 1519c5f54f3eSDavid Gibson spapr->htab = qemu_memalign(size, size); 1520c5f54f3eSDavid Gibson memset(spapr->htab, 0, size); 1521c5f54f3eSDavid Gibson spapr->htab_shift = shift; 1522b817772aSBharata B Rao 1523c5f54f3eSDavid Gibson for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { 1524c5f54f3eSDavid Gibson DIRTY_HPTE(HPTE(spapr->htab, i)); 15257735fedaSBharata B Rao } 152653018216SPaolo Bonzini } 1527ee4d9eccSSuraj Jitindar Singh /* We're setting up a hash table, so that means we're not radix */ 1528176dcceeSSuraj Jitindar Singh spapr->patb_entry = 0; 152900fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(0, LPCR_HR | LPCR_UPRT); 1530a4e3a7c0SGreg Kurz return 0; 153153018216SPaolo Bonzini } 153253018216SPaolo Bonzini 15338897ea5aSDavid Gibson void spapr_setup_hpt(SpaprMachineState *spapr) 1534b4db5413SSuraj Jitindar Singh { 15352772cf6bSDavid Gibson int hpt_shift; 15362772cf6bSDavid Gibson 1537087820e3SGreg Kurz if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { 15382772cf6bSDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); 15392772cf6bSDavid Gibson } else { 1540768a20f3SDavid Gibson uint64_t current_ram_size; 1541768a20f3SDavid Gibson 1542768a20f3SDavid Gibson current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size(); 1543768a20f3SDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size); 15442772cf6bSDavid Gibson } 15452772cf6bSDavid Gibson spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal); 15462772cf6bSDavid Gibson 15478897ea5aSDavid Gibson if (kvm_enabled()) { 15486a84737cSDavid Gibson hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift); 15496a84737cSDavid Gibson 15508897ea5aSDavid Gibson /* Check our RMA fits in the possible VRMA */ 15518897ea5aSDavid Gibson if (vrma_limit < spapr->rma_size) { 15528897ea5aSDavid Gibson error_report("Unable to create %" HWADDR_PRIu 15538897ea5aSDavid Gibson "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB", 15548897ea5aSDavid Gibson spapr->rma_size / MiB, vrma_limit / MiB); 15558897ea5aSDavid Gibson exit(EXIT_FAILURE); 15568897ea5aSDavid Gibson } 1557b4db5413SSuraj Jitindar Singh } 1558b4db5413SSuraj Jitindar Singh } 1559b4db5413SSuraj Jitindar Singh 1560a0628599SLike Xu static void spapr_machine_reset(MachineState *machine) 156153018216SPaolo Bonzini { 1562ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(machine); 1563182735efSAndreas Färber PowerPCCPU *first_ppc_cpu; 1564744a928cSAlexey Kardashevskiy hwaddr fdt_addr; 1565997b6cfcSDavid Gibson void *fdt; 1566997b6cfcSDavid Gibson int rc; 1567259186a7SAndreas Färber 15686c8ebe30SDavid Gibson pef_kvm_reset(machine->cgs, &error_fatal); 15699f6edd06SDavid Gibson spapr_caps_apply(spapr); 157033face6bSDavid Gibson 15711481fe5fSLaurent Vivier first_ppc_cpu = POWERPC_CPU(first_cpu); 15721481fe5fSLaurent Vivier if (kvm_enabled() && kvmppc_has_cap_mmu_radix() && 1573ad99d04cSDavid Gibson ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, 15741481fe5fSLaurent Vivier spapr->max_compat_pvr)) { 157579825f4dSBenjamin Herrenschmidt /* 157679825f4dSBenjamin Herrenschmidt * If using KVM with radix mode available, VCPUs can be started 1577b4db5413SSuraj Jitindar Singh * without a HPT because KVM will start them in radix mode. 157879825f4dSBenjamin Herrenschmidt * Set the GR bit in PATE so that we know there is no HPT. 157979825f4dSBenjamin Herrenschmidt */ 158079825f4dSBenjamin Herrenschmidt spapr->patb_entry = PATE1_GR; 158100fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT); 1582b4db5413SSuraj Jitindar Singh } else { 15838897ea5aSDavid Gibson spapr_setup_hpt(spapr); 1584c5f54f3eSDavid Gibson } 158553018216SPaolo Bonzini 158625c9780dSDavid Gibson qemu_devices_reset(); 158725c9780dSDavid Gibson 15889012a53fSGreg Kurz spapr_ovec_cleanup(spapr->ov5_cas); 15899012a53fSGreg Kurz spapr->ov5_cas = spapr_ovec_new(); 15909012a53fSGreg Kurz 1591ce03a193SLaurent Vivier ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal); 15929012a53fSGreg Kurz 1593ec132efaSAlexey Kardashevskiy /* 1594b2e22477SCédric Le Goater * This is fixing some of the default configuration of the XIVE 1595b2e22477SCédric Le Goater * devices. To be called after the reset of the machine devices. 1596b2e22477SCédric Le Goater */ 1597b2e22477SCédric Le Goater spapr_irq_reset(spapr, &error_fatal); 1598b2e22477SCédric Le Goater 159923ff81bdSGreg Kurz /* 160023ff81bdSGreg Kurz * There is no CAS under qtest. Simulate one to please the code that 160123ff81bdSGreg Kurz * depends on spapr->ov5_cas. This is especially needed to test device 160223ff81bdSGreg Kurz * unplug, so we do that before resetting the DRCs. 160323ff81bdSGreg Kurz */ 160423ff81bdSGreg Kurz if (qtest_enabled()) { 160523ff81bdSGreg Kurz spapr_ovec_cleanup(spapr->ov5_cas); 160623ff81bdSGreg Kurz spapr->ov5_cas = spapr_ovec_clone(spapr->ov5); 160723ff81bdSGreg Kurz } 160823ff81bdSGreg Kurz 160982512483SGreg Kurz /* DRC reset may cause a device to be unplugged. This will cause troubles 161082512483SGreg Kurz * if this device is used by another device (eg, a running vhost backend 161182512483SGreg Kurz * will crash QEMU if the DIMM holding the vring goes away). To avoid such 161282512483SGreg Kurz * situations, we reset DRCs after all devices have been reset. 161382512483SGreg Kurz */ 161411055041SGreg Kurz spapr_drc_reset_all(spapr); 161582512483SGreg Kurz 161656258174SDaniel Henrique Barboza spapr_clear_pending_events(spapr); 161753018216SPaolo Bonzini 1618b7d1f77aSBenjamin Herrenschmidt /* 1619b7d1f77aSBenjamin Herrenschmidt * We place the device tree and RTAS just below either the top of the RMA, 1620df269271SAlexey Kardashevskiy * or just below 2GB, whichever is lower, so that it can be 1621b7d1f77aSBenjamin Herrenschmidt * processed with 32-bit real mode code if necessary 1622b7d1f77aSBenjamin Herrenschmidt */ 1623744a928cSAlexey Kardashevskiy fdt_addr = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FDT_MAX_SIZE; 1624b7d1f77aSBenjamin Herrenschmidt 162597b32a6aSDavid Gibson fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE); 162653018216SPaolo Bonzini 1627997b6cfcSDavid Gibson rc = fdt_pack(fdt); 1628997b6cfcSDavid Gibson 1629997b6cfcSDavid Gibson /* Should only fail if we've built a corrupted tree */ 1630997b6cfcSDavid Gibson assert(rc == 0); 1631997b6cfcSDavid Gibson 1632997b6cfcSDavid Gibson /* Load the fdt */ 1633997b6cfcSDavid Gibson qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); 1634cae172abSDavid Gibson cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); 1635fea35ca4SAlexey Kardashevskiy g_free(spapr->fdt_blob); 1636fea35ca4SAlexey Kardashevskiy spapr->fdt_size = fdt_totalsize(fdt); 1637fea35ca4SAlexey Kardashevskiy spapr->fdt_initial_size = spapr->fdt_size; 1638fea35ca4SAlexey Kardashevskiy spapr->fdt_blob = fdt; 1639997b6cfcSDavid Gibson 164053018216SPaolo Bonzini /* Set up the entry state */ 1641395a20d3SAlexey Kardashevskiy spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, 0, fdt_addr, 0); 1642182735efSAndreas Färber first_ppc_cpu->env.gpr[5] = 0; 164353018216SPaolo Bonzini 1644edfdbf9cSNicholas Piggin spapr->fwnmi_system_reset_addr = -1; 16458af7e1feSNicholas Piggin spapr->fwnmi_machine_check_addr = -1; 16468af7e1feSNicholas Piggin spapr->fwnmi_machine_check_interlock = -1; 16479ac703acSAravinda Prasad 16489ac703acSAravinda Prasad /* Signal all vCPUs waiting on this condition */ 16498af7e1feSNicholas Piggin qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond); 16502500fb42SAravinda Prasad 16512500fb42SAravinda Prasad migrate_del_blocker(spapr->fwnmi_migration_blocker); 165253018216SPaolo Bonzini } 165353018216SPaolo Bonzini 1654ce2918cbSDavid Gibson static void spapr_create_nvram(SpaprMachineState *spapr) 165553018216SPaolo Bonzini { 16563e80f690SMarkus Armbruster DeviceState *dev = qdev_new("spapr-nvram"); 16573978b863SPaolo Bonzini DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); 165853018216SPaolo Bonzini 16593978b863SPaolo Bonzini if (dinfo) { 1660934df912SMarkus Armbruster qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo), 16616231a6daSMarkus Armbruster &error_fatal); 166253018216SPaolo Bonzini } 166353018216SPaolo Bonzini 16643e80f690SMarkus Armbruster qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal); 166553018216SPaolo Bonzini 1666ce2918cbSDavid Gibson spapr->nvram = (struct SpaprNvram *)dev; 166753018216SPaolo Bonzini } 166853018216SPaolo Bonzini 1669ce2918cbSDavid Gibson static void spapr_rtc_create(SpaprMachineState *spapr) 167028df36a1SDavid Gibson { 16719fc7fc4dSMarkus Armbruster object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc, 16729fc7fc4dSMarkus Armbruster sizeof(spapr->rtc), TYPE_SPAPR_RTC, 1673f6d4dca8SThomas Huth &error_fatal, NULL); 1674ce189ab2SMarkus Armbruster qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal); 1675147ff807SCédric Le Goater object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc), 1676d2623129SMarkus Armbruster "date"); 167728df36a1SDavid Gibson } 167828df36a1SDavid Gibson 167953018216SPaolo Bonzini /* Returns whether we want to use VGA or not */ 168014c6a894SDavid Gibson static bool spapr_vga_init(PCIBus *pci_bus, Error **errp) 168153018216SPaolo Bonzini { 168253018216SPaolo Bonzini switch (vga_interface_type) { 168353018216SPaolo Bonzini case VGA_NONE: 16847effdaa3SMark Wu return false; 16857effdaa3SMark Wu case VGA_DEVICE: 16867effdaa3SMark Wu return true; 168753018216SPaolo Bonzini case VGA_STD: 1688b798c190SBenjamin Herrenschmidt case VGA_VIRTIO: 16896e66d0c6SThomas Huth case VGA_CIRRUS: 169053018216SPaolo Bonzini return pci_vga_init(pci_bus) != NULL; 169153018216SPaolo Bonzini default: 169214c6a894SDavid Gibson error_setg(errp, 169314c6a894SDavid Gibson "Unsupported VGA mode, only -vga std or -vga virtio is supported"); 169414c6a894SDavid Gibson return false; 169553018216SPaolo Bonzini } 169653018216SPaolo Bonzini } 169753018216SPaolo Bonzini 16984e5fe368SSuraj Jitindar Singh static int spapr_pre_load(void *opaque) 16994e5fe368SSuraj Jitindar Singh { 17004e5fe368SSuraj Jitindar Singh int rc; 17014e5fe368SSuraj Jitindar Singh 17024e5fe368SSuraj Jitindar Singh rc = spapr_caps_pre_load(opaque); 17034e5fe368SSuraj Jitindar Singh if (rc) { 17044e5fe368SSuraj Jitindar Singh return rc; 17054e5fe368SSuraj Jitindar Singh } 17064e5fe368SSuraj Jitindar Singh 17074e5fe368SSuraj Jitindar Singh return 0; 17084e5fe368SSuraj Jitindar Singh } 17094e5fe368SSuraj Jitindar Singh 1710880ae7deSDavid Gibson static int spapr_post_load(void *opaque, int version_id) 1711880ae7deSDavid Gibson { 1712ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1713880ae7deSDavid Gibson int err = 0; 1714880ae7deSDavid Gibson 1715be85537dSDavid Gibson err = spapr_caps_post_migration(spapr); 1716be85537dSDavid Gibson if (err) { 1717be85537dSDavid Gibson return err; 1718be85537dSDavid Gibson } 1719be85537dSDavid Gibson 1720e502202cSCédric Le Goater /* 1721e502202cSCédric Le Goater * In earlier versions, there was no separate qdev for the PAPR 1722880ae7deSDavid Gibson * RTC, so the RTC offset was stored directly in sPAPREnvironment. 1723880ae7deSDavid Gibson * So when migrating from those versions, poke the incoming offset 1724e502202cSCédric Le Goater * value into the RTC device 1725e502202cSCédric Le Goater */ 1726880ae7deSDavid Gibson if (version_id < 3) { 1727147ff807SCédric Le Goater err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset); 1728e502202cSCédric Le Goater if (err) { 1729e502202cSCédric Le Goater return err; 1730e502202cSCédric Le Goater } 1731880ae7deSDavid Gibson } 1732880ae7deSDavid Gibson 17330c86b2dfSLaurent Vivier if (kvm_enabled() && spapr->patb_entry) { 1734d39c90f5SBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(first_cpu); 173579825f4dSBenjamin Herrenschmidt bool radix = !!(spapr->patb_entry & PATE1_GR); 1736d39c90f5SBharata B Rao bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE); 1737d39c90f5SBharata B Rao 173800fd075eSBenjamin Herrenschmidt /* 173900fd075eSBenjamin Herrenschmidt * Update LPCR:HR and UPRT as they may not be set properly in 174000fd075eSBenjamin Herrenschmidt * the stream 174100fd075eSBenjamin Herrenschmidt */ 174200fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0, 174300fd075eSBenjamin Herrenschmidt LPCR_HR | LPCR_UPRT); 174400fd075eSBenjamin Herrenschmidt 1745d39c90f5SBharata B Rao err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry); 1746d39c90f5SBharata B Rao if (err) { 1747d39c90f5SBharata B Rao error_report("Process table config unsupported by the host"); 1748d39c90f5SBharata B Rao return -EINVAL; 1749d39c90f5SBharata B Rao } 1750d39c90f5SBharata B Rao } 1751d39c90f5SBharata B Rao 17521c53b06cSCédric Le Goater err = spapr_irq_post_load(spapr, version_id); 17531c53b06cSCédric Le Goater if (err) { 17541c53b06cSCédric Le Goater return err; 17551c53b06cSCédric Le Goater } 17561c53b06cSCédric Le Goater 1757880ae7deSDavid Gibson return err; 1758880ae7deSDavid Gibson } 1759880ae7deSDavid Gibson 17604e5fe368SSuraj Jitindar Singh static int spapr_pre_save(void *opaque) 17614e5fe368SSuraj Jitindar Singh { 17624e5fe368SSuraj Jitindar Singh int rc; 17634e5fe368SSuraj Jitindar Singh 17644e5fe368SSuraj Jitindar Singh rc = spapr_caps_pre_save(opaque); 17654e5fe368SSuraj Jitindar Singh if (rc) { 17664e5fe368SSuraj Jitindar Singh return rc; 17674e5fe368SSuraj Jitindar Singh } 17684e5fe368SSuraj Jitindar Singh 17694e5fe368SSuraj Jitindar Singh return 0; 17704e5fe368SSuraj Jitindar Singh } 17714e5fe368SSuraj Jitindar Singh 1772880ae7deSDavid Gibson static bool version_before_3(void *opaque, int version_id) 1773880ae7deSDavid Gibson { 1774880ae7deSDavid Gibson return version_id < 3; 1775880ae7deSDavid Gibson } 1776880ae7deSDavid Gibson 1777fd38804bSDaniel Henrique Barboza static bool spapr_pending_events_needed(void *opaque) 1778fd38804bSDaniel Henrique Barboza { 1779ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1780fd38804bSDaniel Henrique Barboza return !QTAILQ_EMPTY(&spapr->pending_events); 1781fd38804bSDaniel Henrique Barboza } 1782fd38804bSDaniel Henrique Barboza 1783fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_event_entry = { 1784fd38804bSDaniel Henrique Barboza .name = "spapr_event_log_entry", 1785fd38804bSDaniel Henrique Barboza .version_id = 1, 1786fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1787fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 1788ce2918cbSDavid Gibson VMSTATE_UINT32(summary, SpaprEventLogEntry), 1789ce2918cbSDavid Gibson VMSTATE_UINT32(extended_length, SpaprEventLogEntry), 1790ce2918cbSDavid Gibson VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0, 17915341258eSDavid Gibson NULL, extended_length), 1792fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1793fd38804bSDaniel Henrique Barboza }, 1794fd38804bSDaniel Henrique Barboza }; 1795fd38804bSDaniel Henrique Barboza 1796fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_pending_events = { 1797fd38804bSDaniel Henrique Barboza .name = "spapr_pending_events", 1798fd38804bSDaniel Henrique Barboza .version_id = 1, 1799fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1800fd38804bSDaniel Henrique Barboza .needed = spapr_pending_events_needed, 1801fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 1802ce2918cbSDavid Gibson VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1, 1803ce2918cbSDavid Gibson vmstate_spapr_event_entry, SpaprEventLogEntry, next), 1804fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1805fd38804bSDaniel Henrique Barboza }, 1806fd38804bSDaniel Henrique Barboza }; 1807fd38804bSDaniel Henrique Barboza 180862ef3760SMichael Roth static bool spapr_ov5_cas_needed(void *opaque) 180962ef3760SMichael Roth { 1810ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 1811ce2918cbSDavid Gibson SpaprOptionVector *ov5_mask = spapr_ovec_new(); 181262ef3760SMichael Roth bool cas_needed; 181362ef3760SMichael Roth 1814ce2918cbSDavid Gibson /* Prior to the introduction of SpaprOptionVector, we had two option 181562ef3760SMichael Roth * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY. 181662ef3760SMichael Roth * Both of these options encode machine topology into the device-tree 181762ef3760SMichael Roth * in such a way that the now-booted OS should still be able to interact 181862ef3760SMichael Roth * appropriately with QEMU regardless of what options were actually 181962ef3760SMichael Roth * negotiatied on the source side. 182062ef3760SMichael Roth * 182162ef3760SMichael Roth * As such, we can avoid migrating the CAS-negotiated options if these 182262ef3760SMichael Roth * are the only options available on the current machine/platform. 182362ef3760SMichael Roth * Since these are the only options available for pseries-2.7 and 182462ef3760SMichael Roth * earlier, this allows us to maintain old->new/new->old migration 182562ef3760SMichael Roth * compatibility. 182662ef3760SMichael Roth * 182762ef3760SMichael Roth * For QEMU 2.8+, there are additional CAS-negotiatable options available 182862ef3760SMichael Roth * via default pseries-2.8 machines and explicit command-line parameters. 182962ef3760SMichael Roth * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware 183062ef3760SMichael Roth * of the actual CAS-negotiated values to continue working properly. For 183162ef3760SMichael Roth * example, availability of memory unplug depends on knowing whether 183262ef3760SMichael Roth * OV5_HP_EVT was negotiated via CAS. 183362ef3760SMichael Roth * 183462ef3760SMichael Roth * Thus, for any cases where the set of available CAS-negotiatable 183562ef3760SMichael Roth * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we 1836aef19c04SGreg Kurz * include the CAS-negotiated options in the migration stream, unless 1837aef19c04SGreg Kurz * if they affect boot time behaviour only. 183862ef3760SMichael Roth */ 183962ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY); 184062ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY); 1841aef19c04SGreg Kurz spapr_ovec_set(ov5_mask, OV5_DRMEM_V2); 184262ef3760SMichael Roth 1843d1d32d62SDavid Gibson /* We need extra information if we have any bits outside the mask 1844d1d32d62SDavid Gibson * defined above */ 1845d1d32d62SDavid Gibson cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask); 184662ef3760SMichael Roth 184762ef3760SMichael Roth spapr_ovec_cleanup(ov5_mask); 184862ef3760SMichael Roth 184962ef3760SMichael Roth return cas_needed; 185062ef3760SMichael Roth } 185162ef3760SMichael Roth 185262ef3760SMichael Roth static const VMStateDescription vmstate_spapr_ov5_cas = { 185362ef3760SMichael Roth .name = "spapr_option_vector_ov5_cas", 185462ef3760SMichael Roth .version_id = 1, 185562ef3760SMichael Roth .minimum_version_id = 1, 185662ef3760SMichael Roth .needed = spapr_ov5_cas_needed, 185762ef3760SMichael Roth .fields = (VMStateField[]) { 1858ce2918cbSDavid Gibson VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1, 1859ce2918cbSDavid Gibson vmstate_spapr_ovec, SpaprOptionVector), 186062ef3760SMichael Roth VMSTATE_END_OF_LIST() 186162ef3760SMichael Roth }, 186262ef3760SMichael Roth }; 186362ef3760SMichael Roth 18649861bb3eSSuraj Jitindar Singh static bool spapr_patb_entry_needed(void *opaque) 18659861bb3eSSuraj Jitindar Singh { 1866ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 18679861bb3eSSuraj Jitindar Singh 18689861bb3eSSuraj Jitindar Singh return !!spapr->patb_entry; 18699861bb3eSSuraj Jitindar Singh } 18709861bb3eSSuraj Jitindar Singh 18719861bb3eSSuraj Jitindar Singh static const VMStateDescription vmstate_spapr_patb_entry = { 18729861bb3eSSuraj Jitindar Singh .name = "spapr_patb_entry", 18739861bb3eSSuraj Jitindar Singh .version_id = 1, 18749861bb3eSSuraj Jitindar Singh .minimum_version_id = 1, 18759861bb3eSSuraj Jitindar Singh .needed = spapr_patb_entry_needed, 18769861bb3eSSuraj Jitindar Singh .fields = (VMStateField[]) { 1877ce2918cbSDavid Gibson VMSTATE_UINT64(patb_entry, SpaprMachineState), 18789861bb3eSSuraj Jitindar Singh VMSTATE_END_OF_LIST() 18799861bb3eSSuraj Jitindar Singh }, 18809861bb3eSSuraj Jitindar Singh }; 18819861bb3eSSuraj Jitindar Singh 188282cffa2eSCédric Le Goater static bool spapr_irq_map_needed(void *opaque) 188382cffa2eSCédric Le Goater { 1884ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 188582cffa2eSCédric Le Goater 188682cffa2eSCédric Le Goater return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr); 188782cffa2eSCédric Le Goater } 188882cffa2eSCédric Le Goater 188982cffa2eSCédric Le Goater static const VMStateDescription vmstate_spapr_irq_map = { 189082cffa2eSCédric Le Goater .name = "spapr_irq_map", 189182cffa2eSCédric Le Goater .version_id = 1, 189282cffa2eSCédric Le Goater .minimum_version_id = 1, 189382cffa2eSCédric Le Goater .needed = spapr_irq_map_needed, 189482cffa2eSCédric Le Goater .fields = (VMStateField[]) { 1895ce2918cbSDavid Gibson VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr), 189682cffa2eSCédric Le Goater VMSTATE_END_OF_LIST() 189782cffa2eSCédric Le Goater }, 189882cffa2eSCédric Le Goater }; 189982cffa2eSCédric Le Goater 1900fea35ca4SAlexey Kardashevskiy static bool spapr_dtb_needed(void *opaque) 1901fea35ca4SAlexey Kardashevskiy { 1902ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque); 1903fea35ca4SAlexey Kardashevskiy 1904fea35ca4SAlexey Kardashevskiy return smc->update_dt_enabled; 1905fea35ca4SAlexey Kardashevskiy } 1906fea35ca4SAlexey Kardashevskiy 1907fea35ca4SAlexey Kardashevskiy static int spapr_dtb_pre_load(void *opaque) 1908fea35ca4SAlexey Kardashevskiy { 1909ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1910fea35ca4SAlexey Kardashevskiy 1911fea35ca4SAlexey Kardashevskiy g_free(spapr->fdt_blob); 1912fea35ca4SAlexey Kardashevskiy spapr->fdt_blob = NULL; 1913fea35ca4SAlexey Kardashevskiy spapr->fdt_size = 0; 1914fea35ca4SAlexey Kardashevskiy 1915fea35ca4SAlexey Kardashevskiy return 0; 1916fea35ca4SAlexey Kardashevskiy } 1917fea35ca4SAlexey Kardashevskiy 1918fea35ca4SAlexey Kardashevskiy static const VMStateDescription vmstate_spapr_dtb = { 1919fea35ca4SAlexey Kardashevskiy .name = "spapr_dtb", 1920fea35ca4SAlexey Kardashevskiy .version_id = 1, 1921fea35ca4SAlexey Kardashevskiy .minimum_version_id = 1, 1922fea35ca4SAlexey Kardashevskiy .needed = spapr_dtb_needed, 1923fea35ca4SAlexey Kardashevskiy .pre_load = spapr_dtb_pre_load, 1924fea35ca4SAlexey Kardashevskiy .fields = (VMStateField[]) { 1925ce2918cbSDavid Gibson VMSTATE_UINT32(fdt_initial_size, SpaprMachineState), 1926ce2918cbSDavid Gibson VMSTATE_UINT32(fdt_size, SpaprMachineState), 1927ce2918cbSDavid Gibson VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL, 1928fea35ca4SAlexey Kardashevskiy fdt_size), 1929fea35ca4SAlexey Kardashevskiy VMSTATE_END_OF_LIST() 1930fea35ca4SAlexey Kardashevskiy }, 1931fea35ca4SAlexey Kardashevskiy }; 1932fea35ca4SAlexey Kardashevskiy 19332500fb42SAravinda Prasad static bool spapr_fwnmi_needed(void *opaque) 19342500fb42SAravinda Prasad { 19352500fb42SAravinda Prasad SpaprMachineState *spapr = (SpaprMachineState *)opaque; 19362500fb42SAravinda Prasad 19378af7e1feSNicholas Piggin return spapr->fwnmi_machine_check_addr != -1; 19382500fb42SAravinda Prasad } 19392500fb42SAravinda Prasad 19402500fb42SAravinda Prasad static int spapr_fwnmi_pre_save(void *opaque) 19412500fb42SAravinda Prasad { 19422500fb42SAravinda Prasad SpaprMachineState *spapr = (SpaprMachineState *)opaque; 19432500fb42SAravinda Prasad 19442500fb42SAravinda Prasad /* 19452500fb42SAravinda Prasad * Check if machine check handling is in progress and print a 19462500fb42SAravinda Prasad * warning message. 19472500fb42SAravinda Prasad */ 19488af7e1feSNicholas Piggin if (spapr->fwnmi_machine_check_interlock != -1) { 19492500fb42SAravinda Prasad warn_report("A machine check is being handled during migration. The" 19502500fb42SAravinda Prasad "handler may run and log hardware error on the destination"); 19512500fb42SAravinda Prasad } 19522500fb42SAravinda Prasad 19532500fb42SAravinda Prasad return 0; 19542500fb42SAravinda Prasad } 19552500fb42SAravinda Prasad 19568af7e1feSNicholas Piggin static const VMStateDescription vmstate_spapr_fwnmi = { 19578af7e1feSNicholas Piggin .name = "spapr_fwnmi", 19582500fb42SAravinda Prasad .version_id = 1, 19592500fb42SAravinda Prasad .minimum_version_id = 1, 19602500fb42SAravinda Prasad .needed = spapr_fwnmi_needed, 19612500fb42SAravinda Prasad .pre_save = spapr_fwnmi_pre_save, 19622500fb42SAravinda Prasad .fields = (VMStateField[]) { 1963edfdbf9cSNicholas Piggin VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState), 19648af7e1feSNicholas Piggin VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState), 19658af7e1feSNicholas Piggin VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState), 19662500fb42SAravinda Prasad VMSTATE_END_OF_LIST() 19672500fb42SAravinda Prasad }, 19682500fb42SAravinda Prasad }; 19692500fb42SAravinda Prasad 19704be21d56SDavid Gibson static const VMStateDescription vmstate_spapr = { 19714be21d56SDavid Gibson .name = "spapr", 1972880ae7deSDavid Gibson .version_id = 3, 19734be21d56SDavid Gibson .minimum_version_id = 1, 19744e5fe368SSuraj Jitindar Singh .pre_load = spapr_pre_load, 1975880ae7deSDavid Gibson .post_load = spapr_post_load, 19764e5fe368SSuraj Jitindar Singh .pre_save = spapr_pre_save, 19774be21d56SDavid Gibson .fields = (VMStateField[]) { 1978880ae7deSDavid Gibson /* used to be @next_irq */ 1979880ae7deSDavid Gibson VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4), 19804be21d56SDavid Gibson 19814be21d56SDavid Gibson /* RTC offset */ 1982ce2918cbSDavid Gibson VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3), 1983880ae7deSDavid Gibson 1984ce2918cbSDavid Gibson VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2), 19854be21d56SDavid Gibson VMSTATE_END_OF_LIST() 19864be21d56SDavid Gibson }, 198762ef3760SMichael Roth .subsections = (const VMStateDescription*[]) { 198862ef3760SMichael Roth &vmstate_spapr_ov5_cas, 19899861bb3eSSuraj Jitindar Singh &vmstate_spapr_patb_entry, 1990fd38804bSDaniel Henrique Barboza &vmstate_spapr_pending_events, 19914e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_htm, 19924e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_vsx, 19934e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_dfp, 19948f38eaf8SSuraj Jitindar Singh &vmstate_spapr_cap_cfpc, 199509114fd8SSuraj Jitindar Singh &vmstate_spapr_cap_sbbc, 19964be8d4e7SSuraj Jitindar Singh &vmstate_spapr_cap_ibs, 199764d4a534SDavid Gibson &vmstate_spapr_cap_hpt_maxpagesize, 199882cffa2eSCédric Le Goater &vmstate_spapr_irq_map, 1999b9a477b7SSuraj Jitindar Singh &vmstate_spapr_cap_nested_kvm_hv, 2000fea35ca4SAlexey Kardashevskiy &vmstate_spapr_dtb, 2001c982f5cfSSuraj Jitindar Singh &vmstate_spapr_cap_large_decr, 20028ff43ee4SSuraj Jitindar Singh &vmstate_spapr_cap_ccf_assist, 20039d953ce4SAravinda Prasad &vmstate_spapr_cap_fwnmi, 20048af7e1feSNicholas Piggin &vmstate_spapr_fwnmi, 200562ef3760SMichael Roth NULL 200662ef3760SMichael Roth } 20074be21d56SDavid Gibson }; 20084be21d56SDavid Gibson 20094be21d56SDavid Gibson static int htab_save_setup(QEMUFile *f, void *opaque) 20104be21d56SDavid Gibson { 2011ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 20124be21d56SDavid Gibson 20134be21d56SDavid Gibson /* "Iteration" header */ 20143a384297SBharata B Rao if (!spapr->htab_shift) { 20153a384297SBharata B Rao qemu_put_be32(f, -1); 20163a384297SBharata B Rao } else { 20174be21d56SDavid Gibson qemu_put_be32(f, spapr->htab_shift); 20183a384297SBharata B Rao } 20194be21d56SDavid Gibson 2020e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 2021e68cb8b4SAlexey Kardashevskiy spapr->htab_save_index = 0; 2022e68cb8b4SAlexey Kardashevskiy spapr->htab_first_pass = true; 2023e68cb8b4SAlexey Kardashevskiy } else { 20243a384297SBharata B Rao if (spapr->htab_shift) { 2025e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 20264be21d56SDavid Gibson } 20273a384297SBharata B Rao } 20284be21d56SDavid Gibson 2029e68cb8b4SAlexey Kardashevskiy 2030e68cb8b4SAlexey Kardashevskiy return 0; 2031e68cb8b4SAlexey Kardashevskiy } 20324be21d56SDavid Gibson 2033ce2918cbSDavid Gibson static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr, 2034332f7721SGreg Kurz int chunkstart, int n_valid, int n_invalid) 2035332f7721SGreg Kurz { 2036332f7721SGreg Kurz qemu_put_be32(f, chunkstart); 2037332f7721SGreg Kurz qemu_put_be16(f, n_valid); 2038332f7721SGreg Kurz qemu_put_be16(f, n_invalid); 2039332f7721SGreg Kurz qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), 2040332f7721SGreg Kurz HASH_PTE_SIZE_64 * n_valid); 2041332f7721SGreg Kurz } 2042332f7721SGreg Kurz 2043332f7721SGreg Kurz static void htab_save_end_marker(QEMUFile *f) 2044332f7721SGreg Kurz { 2045332f7721SGreg Kurz qemu_put_be32(f, 0); 2046332f7721SGreg Kurz qemu_put_be16(f, 0); 2047332f7721SGreg Kurz qemu_put_be16(f, 0); 2048332f7721SGreg Kurz } 2049332f7721SGreg Kurz 2050ce2918cbSDavid Gibson static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr, 20514be21d56SDavid Gibson int64_t max_ns) 20524be21d56SDavid Gibson { 2053378bc217SDavid Gibson bool has_timeout = max_ns != -1; 20544be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 20554be21d56SDavid Gibson int index = spapr->htab_save_index; 2056bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 20574be21d56SDavid Gibson 20584be21d56SDavid Gibson assert(spapr->htab_first_pass); 20594be21d56SDavid Gibson 20604be21d56SDavid Gibson do { 20614be21d56SDavid Gibson int chunkstart; 20624be21d56SDavid Gibson 20634be21d56SDavid Gibson /* Consume invalid HPTEs */ 20644be21d56SDavid Gibson while ((index < htabslots) 20654be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 20664be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 206724ec2863SMarc-André Lureau index++; 20684be21d56SDavid Gibson } 20694be21d56SDavid Gibson 20704be21d56SDavid Gibson /* Consume valid HPTEs */ 20714be21d56SDavid Gibson chunkstart = index; 2072338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 20734be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 20744be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 207524ec2863SMarc-André Lureau index++; 20764be21d56SDavid Gibson } 20774be21d56SDavid Gibson 20784be21d56SDavid Gibson if (index > chunkstart) { 20794be21d56SDavid Gibson int n_valid = index - chunkstart; 20804be21d56SDavid Gibson 2081332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, 0); 20824be21d56SDavid Gibson 2083378bc217SDavid Gibson if (has_timeout && 2084378bc217SDavid Gibson (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 20854be21d56SDavid Gibson break; 20864be21d56SDavid Gibson } 20874be21d56SDavid Gibson } 20884be21d56SDavid Gibson } while ((index < htabslots) && !qemu_file_rate_limit(f)); 20894be21d56SDavid Gibson 20904be21d56SDavid Gibson if (index >= htabslots) { 20914be21d56SDavid Gibson assert(index == htabslots); 20924be21d56SDavid Gibson index = 0; 20934be21d56SDavid Gibson spapr->htab_first_pass = false; 20944be21d56SDavid Gibson } 20954be21d56SDavid Gibson spapr->htab_save_index = index; 20964be21d56SDavid Gibson } 20974be21d56SDavid Gibson 2098ce2918cbSDavid Gibson static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, 20994be21d56SDavid Gibson int64_t max_ns) 21004be21d56SDavid Gibson { 21014be21d56SDavid Gibson bool final = max_ns < 0; 21024be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 21034be21d56SDavid Gibson int examined = 0, sent = 0; 21044be21d56SDavid Gibson int index = spapr->htab_save_index; 2105bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 21064be21d56SDavid Gibson 21074be21d56SDavid Gibson assert(!spapr->htab_first_pass); 21084be21d56SDavid Gibson 21094be21d56SDavid Gibson do { 21104be21d56SDavid Gibson int chunkstart, invalidstart; 21114be21d56SDavid Gibson 21124be21d56SDavid Gibson /* Consume non-dirty HPTEs */ 21134be21d56SDavid Gibson while ((index < htabslots) 21144be21d56SDavid Gibson && !HPTE_DIRTY(HPTE(spapr->htab, index))) { 21154be21d56SDavid Gibson index++; 21164be21d56SDavid Gibson examined++; 21174be21d56SDavid Gibson } 21184be21d56SDavid Gibson 21194be21d56SDavid Gibson chunkstart = index; 21204be21d56SDavid Gibson /* Consume valid dirty HPTEs */ 2121338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 21224be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 21234be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 21244be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 21254be21d56SDavid Gibson index++; 21264be21d56SDavid Gibson examined++; 21274be21d56SDavid Gibson } 21284be21d56SDavid Gibson 21294be21d56SDavid Gibson invalidstart = index; 21304be21d56SDavid Gibson /* Consume invalid dirty HPTEs */ 2131338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - invalidstart < USHRT_MAX) 21324be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 21334be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 21344be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 21354be21d56SDavid Gibson index++; 21364be21d56SDavid Gibson examined++; 21374be21d56SDavid Gibson } 21384be21d56SDavid Gibson 21394be21d56SDavid Gibson if (index > chunkstart) { 21404be21d56SDavid Gibson int n_valid = invalidstart - chunkstart; 21414be21d56SDavid Gibson int n_invalid = index - invalidstart; 21424be21d56SDavid Gibson 2143332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid); 21444be21d56SDavid Gibson sent += index - chunkstart; 21454be21d56SDavid Gibson 2146bc72ad67SAlex Bligh if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 21474be21d56SDavid Gibson break; 21484be21d56SDavid Gibson } 21494be21d56SDavid Gibson } 21504be21d56SDavid Gibson 21514be21d56SDavid Gibson if (examined >= htabslots) { 21524be21d56SDavid Gibson break; 21534be21d56SDavid Gibson } 21544be21d56SDavid Gibson 21554be21d56SDavid Gibson if (index >= htabslots) { 21564be21d56SDavid Gibson assert(index == htabslots); 21574be21d56SDavid Gibson index = 0; 21584be21d56SDavid Gibson } 21594be21d56SDavid Gibson } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); 21604be21d56SDavid Gibson 21614be21d56SDavid Gibson if (index >= htabslots) { 21624be21d56SDavid Gibson assert(index == htabslots); 21634be21d56SDavid Gibson index = 0; 21644be21d56SDavid Gibson } 21654be21d56SDavid Gibson 21664be21d56SDavid Gibson spapr->htab_save_index = index; 21674be21d56SDavid Gibson 2168e68cb8b4SAlexey Kardashevskiy return (examined >= htabslots) && (sent == 0) ? 1 : 0; 21694be21d56SDavid Gibson } 21704be21d56SDavid Gibson 2171e68cb8b4SAlexey Kardashevskiy #define MAX_ITERATION_NS 5000000 /* 5 ms */ 2172e68cb8b4SAlexey Kardashevskiy #define MAX_KVM_BUF_SIZE 2048 2173e68cb8b4SAlexey Kardashevskiy 21744be21d56SDavid Gibson static int htab_save_iterate(QEMUFile *f, void *opaque) 21754be21d56SDavid Gibson { 2176ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2177715c5407SDavid Gibson int fd; 2178e68cb8b4SAlexey Kardashevskiy int rc = 0; 21794be21d56SDavid Gibson 21804be21d56SDavid Gibson /* Iteration header */ 21813a384297SBharata B Rao if (!spapr->htab_shift) { 21823a384297SBharata B Rao qemu_put_be32(f, -1); 2183e8cd4247SLaurent Vivier return 1; 21843a384297SBharata B Rao } else { 21854be21d56SDavid Gibson qemu_put_be32(f, 0); 21863a384297SBharata B Rao } 21874be21d56SDavid Gibson 2188e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2189e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2190e68cb8b4SAlexey Kardashevskiy 2191715c5407SDavid Gibson fd = get_htab_fd(spapr); 2192715c5407SDavid Gibson if (fd < 0) { 2193715c5407SDavid Gibson return fd; 219401a57972SSamuel Mendoza-Jonas } 219501a57972SSamuel Mendoza-Jonas 2196715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS); 2197e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2198e68cb8b4SAlexey Kardashevskiy return rc; 2199e68cb8b4SAlexey Kardashevskiy } 2200e68cb8b4SAlexey Kardashevskiy } else if (spapr->htab_first_pass) { 22014be21d56SDavid Gibson htab_save_first_pass(f, spapr, MAX_ITERATION_NS); 22024be21d56SDavid Gibson } else { 2203e68cb8b4SAlexey Kardashevskiy rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS); 22044be21d56SDavid Gibson } 22054be21d56SDavid Gibson 2206332f7721SGreg Kurz htab_save_end_marker(f); 22074be21d56SDavid Gibson 2208e68cb8b4SAlexey Kardashevskiy return rc; 22094be21d56SDavid Gibson } 22104be21d56SDavid Gibson 22114be21d56SDavid Gibson static int htab_save_complete(QEMUFile *f, void *opaque) 22124be21d56SDavid Gibson { 2213ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2214715c5407SDavid Gibson int fd; 22154be21d56SDavid Gibson 22164be21d56SDavid Gibson /* Iteration header */ 22173a384297SBharata B Rao if (!spapr->htab_shift) { 22183a384297SBharata B Rao qemu_put_be32(f, -1); 22193a384297SBharata B Rao return 0; 22203a384297SBharata B Rao } else { 22214be21d56SDavid Gibson qemu_put_be32(f, 0); 22223a384297SBharata B Rao } 22234be21d56SDavid Gibson 2224e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2225e68cb8b4SAlexey Kardashevskiy int rc; 2226e68cb8b4SAlexey Kardashevskiy 2227e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2228e68cb8b4SAlexey Kardashevskiy 2229715c5407SDavid Gibson fd = get_htab_fd(spapr); 2230715c5407SDavid Gibson if (fd < 0) { 2231715c5407SDavid Gibson return fd; 223201a57972SSamuel Mendoza-Jonas } 223301a57972SSamuel Mendoza-Jonas 2234715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1); 2235e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2236e68cb8b4SAlexey Kardashevskiy return rc; 2237e68cb8b4SAlexey Kardashevskiy } 2238e68cb8b4SAlexey Kardashevskiy } else { 2239378bc217SDavid Gibson if (spapr->htab_first_pass) { 2240378bc217SDavid Gibson htab_save_first_pass(f, spapr, -1); 2241378bc217SDavid Gibson } 22424be21d56SDavid Gibson htab_save_later_pass(f, spapr, -1); 2243e68cb8b4SAlexey Kardashevskiy } 22444be21d56SDavid Gibson 22454be21d56SDavid Gibson /* End marker */ 2246332f7721SGreg Kurz htab_save_end_marker(f); 22474be21d56SDavid Gibson 22484be21d56SDavid Gibson return 0; 22494be21d56SDavid Gibson } 22504be21d56SDavid Gibson 22514be21d56SDavid Gibson static int htab_load(QEMUFile *f, void *opaque, int version_id) 22524be21d56SDavid Gibson { 2253ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 22544be21d56SDavid Gibson uint32_t section_hdr; 2255e68cb8b4SAlexey Kardashevskiy int fd = -1; 225614b0d748SGreg Kurz Error *local_err = NULL; 22574be21d56SDavid Gibson 22584be21d56SDavid Gibson if (version_id < 1 || version_id > 1) { 225998a5d100SDavid Gibson error_report("htab_load() bad version"); 22604be21d56SDavid Gibson return -EINVAL; 22614be21d56SDavid Gibson } 22624be21d56SDavid Gibson 22634be21d56SDavid Gibson section_hdr = qemu_get_be32(f); 22644be21d56SDavid Gibson 22653a384297SBharata B Rao if (section_hdr == -1) { 22663a384297SBharata B Rao spapr_free_hpt(spapr); 22673a384297SBharata B Rao return 0; 22683a384297SBharata B Rao } 22693a384297SBharata B Rao 22704be21d56SDavid Gibson if (section_hdr) { 2271a4e3a7c0SGreg Kurz int ret; 2272a4e3a7c0SGreg Kurz 2273c5f54f3eSDavid Gibson /* First section gives the htab size */ 2274a4e3a7c0SGreg Kurz ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err); 2275a4e3a7c0SGreg Kurz if (ret < 0) { 2276c5f54f3eSDavid Gibson error_report_err(local_err); 2277a4e3a7c0SGreg Kurz return ret; 22784be21d56SDavid Gibson } 22794be21d56SDavid Gibson return 0; 22804be21d56SDavid Gibson } 22814be21d56SDavid Gibson 2282e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2283e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2284e68cb8b4SAlexey Kardashevskiy 228514b0d748SGreg Kurz fd = kvmppc_get_htab_fd(true, 0, &local_err); 2286e68cb8b4SAlexey Kardashevskiy if (fd < 0) { 228714b0d748SGreg Kurz error_report_err(local_err); 228882be8e73SGreg Kurz return fd; 2289e68cb8b4SAlexey Kardashevskiy } 2290e68cb8b4SAlexey Kardashevskiy } 2291e68cb8b4SAlexey Kardashevskiy 22924be21d56SDavid Gibson while (true) { 22934be21d56SDavid Gibson uint32_t index; 22944be21d56SDavid Gibson uint16_t n_valid, n_invalid; 22954be21d56SDavid Gibson 22964be21d56SDavid Gibson index = qemu_get_be32(f); 22974be21d56SDavid Gibson n_valid = qemu_get_be16(f); 22984be21d56SDavid Gibson n_invalid = qemu_get_be16(f); 22994be21d56SDavid Gibson 23004be21d56SDavid Gibson if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) { 23014be21d56SDavid Gibson /* End of Stream */ 23024be21d56SDavid Gibson break; 23034be21d56SDavid Gibson } 23044be21d56SDavid Gibson 2305e68cb8b4SAlexey Kardashevskiy if ((index + n_valid + n_invalid) > 23064be21d56SDavid Gibson (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) { 23074be21d56SDavid Gibson /* Bad index in stream */ 230898a5d100SDavid Gibson error_report( 230998a5d100SDavid Gibson "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)", 231098a5d100SDavid Gibson index, n_valid, n_invalid, spapr->htab_shift); 23114be21d56SDavid Gibson return -EINVAL; 23124be21d56SDavid Gibson } 23134be21d56SDavid Gibson 2314e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 23154be21d56SDavid Gibson if (n_valid) { 23164be21d56SDavid Gibson qemu_get_buffer(f, HPTE(spapr->htab, index), 23174be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_valid); 23184be21d56SDavid Gibson } 23194be21d56SDavid Gibson if (n_invalid) { 23204be21d56SDavid Gibson memset(HPTE(spapr->htab, index + n_valid), 0, 23214be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_invalid); 23224be21d56SDavid Gibson } 2323e68cb8b4SAlexey Kardashevskiy } else { 2324e68cb8b4SAlexey Kardashevskiy int rc; 2325e68cb8b4SAlexey Kardashevskiy 2326e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2327e68cb8b4SAlexey Kardashevskiy 23280a06e4d6SGreg Kurz rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid, 23290a06e4d6SGreg Kurz &local_err); 2330e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 23310a06e4d6SGreg Kurz error_report_err(local_err); 2332e68cb8b4SAlexey Kardashevskiy return rc; 2333e68cb8b4SAlexey Kardashevskiy } 2334e68cb8b4SAlexey Kardashevskiy } 2335e68cb8b4SAlexey Kardashevskiy } 2336e68cb8b4SAlexey Kardashevskiy 2337e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2338e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2339e68cb8b4SAlexey Kardashevskiy close(fd); 23404be21d56SDavid Gibson } 23414be21d56SDavid Gibson 23424be21d56SDavid Gibson return 0; 23434be21d56SDavid Gibson } 23444be21d56SDavid Gibson 234570f794fcSJuan Quintela static void htab_save_cleanup(void *opaque) 2346c573fc03SThomas Huth { 2347ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2348c573fc03SThomas Huth 2349c573fc03SThomas Huth close_htab_fd(spapr); 2350c573fc03SThomas Huth } 2351c573fc03SThomas Huth 23524be21d56SDavid Gibson static SaveVMHandlers savevm_htab_handlers = { 23539907e842SJuan Quintela .save_setup = htab_save_setup, 23544be21d56SDavid Gibson .save_live_iterate = htab_save_iterate, 2355a3e06c3dSDr. David Alan Gilbert .save_live_complete_precopy = htab_save_complete, 235670f794fcSJuan Quintela .save_cleanup = htab_save_cleanup, 23574be21d56SDavid Gibson .load_state = htab_load, 23584be21d56SDavid Gibson }; 23594be21d56SDavid Gibson 23605b2128d2SAlexander Graf static void spapr_boot_set(void *opaque, const char *boot_device, 23615b2128d2SAlexander Graf Error **errp) 23625b2128d2SAlexander Graf { 2363c86c1affSDaniel Henrique Barboza MachineState *machine = MACHINE(opaque); 23645b2128d2SAlexander Graf machine->boot_order = g_strdup(boot_device); 23655b2128d2SAlexander Graf } 23665b2128d2SAlexander Graf 2367ce2918cbSDavid Gibson static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr) 2368224245bfSDavid Gibson { 2369224245bfSDavid Gibson MachineState *machine = MACHINE(spapr); 2370224245bfSDavid Gibson uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 2371e8f986fcSBharata B Rao uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size; 2372224245bfSDavid Gibson int i; 2373224245bfSDavid Gibson 2374224245bfSDavid Gibson for (i = 0; i < nr_lmbs; i++) { 2375224245bfSDavid Gibson uint64_t addr; 2376224245bfSDavid Gibson 2377b0c14ec4SDavid Hildenbrand addr = i * lmb_size + machine->device_memory->base; 23786caf3ac6SDavid Gibson spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB, 2379224245bfSDavid Gibson addr / lmb_size); 2380224245bfSDavid Gibson } 2381224245bfSDavid Gibson } 2382224245bfSDavid Gibson 2383224245bfSDavid Gibson /* 2384224245bfSDavid Gibson * If RAM size, maxmem size and individual node mem sizes aren't aligned 2385224245bfSDavid Gibson * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest 2386224245bfSDavid Gibson * since we can't support such unaligned sizes with DRCONF_MEMORY. 2387224245bfSDavid Gibson */ 23887c150d6fSDavid Gibson static void spapr_validate_node_memory(MachineState *machine, Error **errp) 2389224245bfSDavid Gibson { 2390224245bfSDavid Gibson int i; 2391224245bfSDavid Gibson 23927c150d6fSDavid Gibson if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) { 23937c150d6fSDavid Gibson error_setg(errp, "Memory size 0x" RAM_ADDR_FMT 2394ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 23957c150d6fSDavid Gibson machine->ram_size, 2396d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 23977c150d6fSDavid Gibson return; 23987c150d6fSDavid Gibson } 23997c150d6fSDavid Gibson 24007c150d6fSDavid Gibson if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) { 24017c150d6fSDavid Gibson error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT 2402ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 24037c150d6fSDavid Gibson machine->ram_size, 2404d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 24057c150d6fSDavid Gibson return; 2406224245bfSDavid Gibson } 2407224245bfSDavid Gibson 2408aa570207STao Xu for (i = 0; i < machine->numa_state->num_nodes; i++) { 24097e721e7bSTao Xu if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) { 24107c150d6fSDavid Gibson error_setg(errp, 24117c150d6fSDavid Gibson "Node %d memory size 0x%" PRIx64 2412ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 24137e721e7bSTao Xu i, machine->numa_state->nodes[i].node_mem, 2414d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 24157c150d6fSDavid Gibson return; 2416224245bfSDavid Gibson } 2417224245bfSDavid Gibson } 2418224245bfSDavid Gibson } 2419224245bfSDavid Gibson 2420535455fdSIgor Mammedov /* find cpu slot in machine->possible_cpus by core_id */ 2421535455fdSIgor Mammedov static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx) 2422535455fdSIgor Mammedov { 2423fe6b6346SLike Xu int index = id / ms->smp.threads; 2424535455fdSIgor Mammedov 2425535455fdSIgor Mammedov if (index >= ms->possible_cpus->len) { 2426535455fdSIgor Mammedov return NULL; 2427535455fdSIgor Mammedov } 2428535455fdSIgor Mammedov if (idx) { 2429535455fdSIgor Mammedov *idx = index; 2430535455fdSIgor Mammedov } 2431535455fdSIgor Mammedov return &ms->possible_cpus->cpus[index]; 2432535455fdSIgor Mammedov } 2433535455fdSIgor Mammedov 2434ce2918cbSDavid Gibson static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp) 2435fa98fbfcSSam Bobroff { 2436fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 243729cb4187SGreg Kurz SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 2438fa98fbfcSSam Bobroff Error *local_err = NULL; 2439fa98fbfcSSam Bobroff bool vsmt_user = !!spapr->vsmt; 2440fa98fbfcSSam Bobroff int kvm_smt = kvmppc_smt_threads(); 2441fa98fbfcSSam Bobroff int ret; 2442fe6b6346SLike Xu unsigned int smp_threads = ms->smp.threads; 2443fa98fbfcSSam Bobroff 2444fa98fbfcSSam Bobroff if (!kvm_enabled() && (smp_threads > 1)) { 2445dcfe4805SMarkus Armbruster error_setg(errp, "TCG cannot support more than 1 thread/core " 2446fa98fbfcSSam Bobroff "on a pseries machine"); 2447dcfe4805SMarkus Armbruster return; 2448fa98fbfcSSam Bobroff } 2449fa98fbfcSSam Bobroff if (!is_power_of_2(smp_threads)) { 2450dcfe4805SMarkus Armbruster error_setg(errp, "Cannot support %d threads/core on a pseries " 2451fa98fbfcSSam Bobroff "machine because it must be a power of 2", smp_threads); 2452dcfe4805SMarkus Armbruster return; 2453fa98fbfcSSam Bobroff } 2454fa98fbfcSSam Bobroff 2455fa98fbfcSSam Bobroff /* Detemine the VSMT mode to use: */ 2456fa98fbfcSSam Bobroff if (vsmt_user) { 2457fa98fbfcSSam Bobroff if (spapr->vsmt < smp_threads) { 2458dcfe4805SMarkus Armbruster error_setg(errp, "Cannot support VSMT mode %d" 2459fa98fbfcSSam Bobroff " because it must be >= threads/core (%d)", 2460fa98fbfcSSam Bobroff spapr->vsmt, smp_threads); 2461dcfe4805SMarkus Armbruster return; 2462fa98fbfcSSam Bobroff } 2463fa98fbfcSSam Bobroff /* In this case, spapr->vsmt has been set by the command line */ 246429cb4187SGreg Kurz } else if (!smc->smp_threads_vsmt) { 24658904e5a7SDavid Gibson /* 24668904e5a7SDavid Gibson * Default VSMT value is tricky, because we need it to be as 24678904e5a7SDavid Gibson * consistent as possible (for migration), but this requires 24688904e5a7SDavid Gibson * changing it for at least some existing cases. We pick 8 as 24698904e5a7SDavid Gibson * the value that we'd get with KVM on POWER8, the 24708904e5a7SDavid Gibson * overwhelmingly common case in production systems. 24718904e5a7SDavid Gibson */ 24724ad64cbdSLaurent Vivier spapr->vsmt = MAX(8, smp_threads); 247329cb4187SGreg Kurz } else { 247429cb4187SGreg Kurz spapr->vsmt = smp_threads; 2475fa98fbfcSSam Bobroff } 2476fa98fbfcSSam Bobroff 2477fa98fbfcSSam Bobroff /* KVM: If necessary, set the SMT mode: */ 2478fa98fbfcSSam Bobroff if (kvm_enabled() && (spapr->vsmt != kvm_smt)) { 2479fa98fbfcSSam Bobroff ret = kvmppc_set_smt_threads(spapr->vsmt); 2480fa98fbfcSSam Bobroff if (ret) { 24811f20f2e0SDavid Gibson /* Looks like KVM isn't able to change VSMT mode */ 2482fa98fbfcSSam Bobroff error_setg(&local_err, 2483fa98fbfcSSam Bobroff "Failed to set KVM's VSMT mode to %d (errno %d)", 2484fa98fbfcSSam Bobroff spapr->vsmt, ret); 24851f20f2e0SDavid Gibson /* We can live with that if the default one is big enough 24861f20f2e0SDavid Gibson * for the number of threads, and a submultiple of the one 24871f20f2e0SDavid Gibson * we want. In this case we'll waste some vcpu ids, but 24881f20f2e0SDavid Gibson * behaviour will be correct */ 24891f20f2e0SDavid Gibson if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) { 24901f20f2e0SDavid Gibson warn_report_err(local_err); 24911f20f2e0SDavid Gibson } else { 2492fa98fbfcSSam Bobroff if (!vsmt_user) { 24931f20f2e0SDavid Gibson error_append_hint(&local_err, 24941f20f2e0SDavid Gibson "On PPC, a VM with %d threads/core" 24951f20f2e0SDavid Gibson " on a host with %d threads/core" 24961f20f2e0SDavid Gibson " requires the use of VSMT mode %d.\n", 2497fa98fbfcSSam Bobroff smp_threads, kvm_smt, spapr->vsmt); 2498fa98fbfcSSam Bobroff } 2499cdcca22aSVladimir Sementsov-Ogievskiy kvmppc_error_append_smt_possible_hint(&local_err); 2500dcfe4805SMarkus Armbruster error_propagate(errp, local_err); 2501fa98fbfcSSam Bobroff } 2502fa98fbfcSSam Bobroff } 25031f20f2e0SDavid Gibson } 2504fa98fbfcSSam Bobroff /* else TCG: nothing to do currently */ 2505fa98fbfcSSam Bobroff } 2506fa98fbfcSSam Bobroff 2507ce2918cbSDavid Gibson static void spapr_init_cpus(SpaprMachineState *spapr) 25081a5008fcSGreg Kurz { 25091a5008fcSGreg Kurz MachineState *machine = MACHINE(spapr); 25101a5008fcSGreg Kurz MachineClass *mc = MACHINE_GET_CLASS(machine); 2511ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 25121a5008fcSGreg Kurz const char *type = spapr_get_cpu_core_type(machine->cpu_type); 25131a5008fcSGreg Kurz const CPUArchIdList *possible_cpus; 2514fe6b6346SLike Xu unsigned int smp_cpus = machine->smp.cpus; 2515fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 2516fe6b6346SLike Xu unsigned int max_cpus = machine->smp.max_cpus; 25171a5008fcSGreg Kurz int boot_cores_nr = smp_cpus / smp_threads; 25181a5008fcSGreg Kurz int i; 25191a5008fcSGreg Kurz 25201a5008fcSGreg Kurz possible_cpus = mc->possible_cpu_arch_ids(machine); 25211a5008fcSGreg Kurz if (mc->has_hotpluggable_cpus) { 25221a5008fcSGreg Kurz if (smp_cpus % smp_threads) { 25231a5008fcSGreg Kurz error_report("smp_cpus (%u) must be multiple of threads (%u)", 25241a5008fcSGreg Kurz smp_cpus, smp_threads); 25251a5008fcSGreg Kurz exit(1); 25261a5008fcSGreg Kurz } 25271a5008fcSGreg Kurz if (max_cpus % smp_threads) { 25281a5008fcSGreg Kurz error_report("max_cpus (%u) must be multiple of threads (%u)", 25291a5008fcSGreg Kurz max_cpus, smp_threads); 25301a5008fcSGreg Kurz exit(1); 25311a5008fcSGreg Kurz } 25321a5008fcSGreg Kurz } else { 25331a5008fcSGreg Kurz if (max_cpus != smp_cpus) { 25341a5008fcSGreg Kurz error_report("This machine version does not support CPU hotplug"); 25351a5008fcSGreg Kurz exit(1); 25361a5008fcSGreg Kurz } 25371a5008fcSGreg Kurz boot_cores_nr = possible_cpus->len; 25381a5008fcSGreg Kurz } 25391a5008fcSGreg Kurz 25401a5008fcSGreg Kurz if (smc->pre_2_10_has_unused_icps) { 25411a5008fcSGreg Kurz int i; 25421a5008fcSGreg Kurz 25431a518e76SCédric Le Goater for (i = 0; i < spapr_max_server_number(spapr); i++) { 25441a5008fcSGreg Kurz /* Dummy entries get deregistered when real ICPState objects 25451a5008fcSGreg Kurz * are registered during CPU core hotplug. 25461a5008fcSGreg Kurz */ 25471a5008fcSGreg Kurz pre_2_10_vmstate_register_dummy_icp(i); 25481a5008fcSGreg Kurz } 25491a5008fcSGreg Kurz } 25501a5008fcSGreg Kurz 25511a5008fcSGreg Kurz for (i = 0; i < possible_cpus->len; i++) { 25521a5008fcSGreg Kurz int core_id = i * smp_threads; 25531a5008fcSGreg Kurz 25541a5008fcSGreg Kurz if (mc->has_hotpluggable_cpus) { 25551a5008fcSGreg Kurz spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU, 25561a5008fcSGreg Kurz spapr_vcpu_id(spapr, core_id)); 25571a5008fcSGreg Kurz } 25581a5008fcSGreg Kurz 25591a5008fcSGreg Kurz if (i < boot_cores_nr) { 25601a5008fcSGreg Kurz Object *core = object_new(type); 25611a5008fcSGreg Kurz int nr_threads = smp_threads; 25621a5008fcSGreg Kurz 25631a5008fcSGreg Kurz /* Handle the partially filled core for older machine types */ 25641a5008fcSGreg Kurz if ((i + 1) * smp_threads >= smp_cpus) { 25651a5008fcSGreg Kurz nr_threads = smp_cpus - i * smp_threads; 25661a5008fcSGreg Kurz } 25671a5008fcSGreg Kurz 25685325cc34SMarkus Armbruster object_property_set_int(core, "nr-threads", nr_threads, 25691a5008fcSGreg Kurz &error_fatal); 25705325cc34SMarkus Armbruster object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id, 25711a5008fcSGreg Kurz &error_fatal); 2572ce189ab2SMarkus Armbruster qdev_realize(DEVICE(core), NULL, &error_fatal); 2573ecda255eSSam Bobroff 2574ecda255eSSam Bobroff object_unref(core); 25751a5008fcSGreg Kurz } 25761a5008fcSGreg Kurz } 25771a5008fcSGreg Kurz } 25781a5008fcSGreg Kurz 2579999c9cafSGreg Kurz static PCIHostState *spapr_create_default_phb(void) 2580999c9cafSGreg Kurz { 2581999c9cafSGreg Kurz DeviceState *dev; 2582999c9cafSGreg Kurz 25833e80f690SMarkus Armbruster dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE); 2584999c9cafSGreg Kurz qdev_prop_set_uint32(dev, "index", 0); 25853c6ef471SMarkus Armbruster sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 2586999c9cafSGreg Kurz 2587999c9cafSGreg Kurz return PCI_HOST_BRIDGE(dev); 2588999c9cafSGreg Kurz } 2589999c9cafSGreg Kurz 2590425f0b7aSDavid Gibson static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp) 2591425f0b7aSDavid Gibson { 2592425f0b7aSDavid Gibson MachineState *machine = MACHINE(spapr); 2593425f0b7aSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 2594425f0b7aSDavid Gibson hwaddr rma_size = machine->ram_size; 2595425f0b7aSDavid Gibson hwaddr node0_size = spapr_node0_size(machine); 2596425f0b7aSDavid Gibson 2597425f0b7aSDavid Gibson /* RMA has to fit in the first NUMA node */ 2598425f0b7aSDavid Gibson rma_size = MIN(rma_size, node0_size); 2599425f0b7aSDavid Gibson 2600425f0b7aSDavid Gibson /* 2601425f0b7aSDavid Gibson * VRMA access is via a special 1TiB SLB mapping, so the RMA can 2602425f0b7aSDavid Gibson * never exceed that 2603425f0b7aSDavid Gibson */ 2604425f0b7aSDavid Gibson rma_size = MIN(rma_size, 1 * TiB); 2605425f0b7aSDavid Gibson 2606425f0b7aSDavid Gibson /* 2607425f0b7aSDavid Gibson * Clamp the RMA size based on machine type. This is for 2608425f0b7aSDavid Gibson * migration compatibility with older qemu versions, which limited 2609425f0b7aSDavid Gibson * the RMA size for complicated and mostly bad reasons. 2610425f0b7aSDavid Gibson */ 2611425f0b7aSDavid Gibson if (smc->rma_limit) { 2612425f0b7aSDavid Gibson rma_size = MIN(rma_size, smc->rma_limit); 2613425f0b7aSDavid Gibson } 2614425f0b7aSDavid Gibson 2615425f0b7aSDavid Gibson if (rma_size < MIN_RMA_SLOF) { 2616425f0b7aSDavid Gibson error_setg(errp, 2617425f0b7aSDavid Gibson "pSeries SLOF firmware requires >= %" HWADDR_PRIx 2618425f0b7aSDavid Gibson "ldMiB guest RMA (Real Mode Area memory)", 2619425f0b7aSDavid Gibson MIN_RMA_SLOF / MiB); 2620425f0b7aSDavid Gibson return 0; 2621425f0b7aSDavid Gibson } 2622425f0b7aSDavid Gibson 2623425f0b7aSDavid Gibson return rma_size; 2624425f0b7aSDavid Gibson } 2625425f0b7aSDavid Gibson 2626ce316b51SGreg Kurz static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr) 2627ce316b51SGreg Kurz { 2628ce316b51SGreg Kurz MachineState *machine = MACHINE(spapr); 2629ce316b51SGreg Kurz int i; 2630ce316b51SGreg Kurz 2631ce316b51SGreg Kurz for (i = 0; i < machine->ram_slots; i++) { 2632ce316b51SGreg Kurz spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i); 2633ce316b51SGreg Kurz } 2634ce316b51SGreg Kurz } 2635ce316b51SGreg Kurz 263653018216SPaolo Bonzini /* pSeries LPAR / sPAPR hardware init */ 2637bcb5ce08SDavid Gibson static void spapr_machine_init(MachineState *machine) 263853018216SPaolo Bonzini { 2639ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(machine); 2640ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 2641ee3a71e3SShivaprasad G Bhat MachineClass *mc = MACHINE_GET_CLASS(machine); 2642cd7b9498SPaolo Bonzini const char *bios_name = machine->firmware ?: FW_FILE_NAME; 26433ef96221SMarcel Apfelbaum const char *kernel_filename = machine->kernel_filename; 26443ef96221SMarcel Apfelbaum const char *initrd_filename = machine->initrd_filename; 264553018216SPaolo Bonzini PCIHostState *phb; 264653018216SPaolo Bonzini int i; 264753018216SPaolo Bonzini MemoryRegion *sysmem = get_system_memory(); 2648b7d1f77aSBenjamin Herrenschmidt long load_limit, fw_size; 264953018216SPaolo Bonzini char *filename; 265030f4b05bSDavid Gibson Error *resize_hpt_err = NULL; 265153018216SPaolo Bonzini 26526c8ebe30SDavid Gibson /* 26536c8ebe30SDavid Gibson * if Secure VM (PEF) support is configured, then initialize it 26546c8ebe30SDavid Gibson */ 26556c8ebe30SDavid Gibson pef_kvm_init(machine->cgs, &error_fatal); 26566c8ebe30SDavid Gibson 2657226419d6SMichael S. Tsirkin msi_nonbroken = true; 265853018216SPaolo Bonzini 265953018216SPaolo Bonzini QLIST_INIT(&spapr->phbs); 26600cffce56SDavid Gibson QTAILQ_INIT(&spapr->pending_dimm_unplugs); 266153018216SPaolo Bonzini 26629f6edd06SDavid Gibson /* Determine capabilities to run with */ 26639f6edd06SDavid Gibson spapr_caps_init(spapr); 26649f6edd06SDavid Gibson 266530f4b05bSDavid Gibson kvmppc_check_papr_resize_hpt(&resize_hpt_err); 266630f4b05bSDavid Gibson if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) { 266730f4b05bSDavid Gibson /* 266830f4b05bSDavid Gibson * If the user explicitly requested a mode we should either 266930f4b05bSDavid Gibson * supply it, or fail completely (which we do below). But if 267030f4b05bSDavid Gibson * it's not set explicitly, we reset our mode to something 267130f4b05bSDavid Gibson * that works 267230f4b05bSDavid Gibson */ 267330f4b05bSDavid Gibson if (resize_hpt_err) { 267430f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 267530f4b05bSDavid Gibson error_free(resize_hpt_err); 267630f4b05bSDavid Gibson resize_hpt_err = NULL; 267730f4b05bSDavid Gibson } else { 267830f4b05bSDavid Gibson spapr->resize_hpt = smc->resize_hpt_default; 267930f4b05bSDavid Gibson } 268030f4b05bSDavid Gibson } 268130f4b05bSDavid Gibson 268230f4b05bSDavid Gibson assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT); 268330f4b05bSDavid Gibson 268430f4b05bSDavid Gibson if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) { 268530f4b05bSDavid Gibson /* 268630f4b05bSDavid Gibson * User requested HPT resize, but this host can't supply it. Bail out 268730f4b05bSDavid Gibson */ 268830f4b05bSDavid Gibson error_report_err(resize_hpt_err); 268930f4b05bSDavid Gibson exit(1); 269030f4b05bSDavid Gibson } 269114963c34SMarkus Armbruster error_free(resize_hpt_err); 269230f4b05bSDavid Gibson 2693425f0b7aSDavid Gibson spapr->rma_size = spapr_rma_size(spapr, &error_fatal); 2694c4177479SAlexey Kardashevskiy 2695b7d1f77aSBenjamin Herrenschmidt /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */ 2696b7d1f77aSBenjamin Herrenschmidt load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD; 269753018216SPaolo Bonzini 2698482969d6SCédric Le Goater /* 2699482969d6SCédric Le Goater * VSMT must be set in order to be able to compute VCPU ids, ie to 27001a518e76SCédric Le Goater * call spapr_max_server_number() or spapr_vcpu_id(). 2701482969d6SCédric Le Goater */ 2702482969d6SCédric Le Goater spapr_set_vsmt_mode(spapr, &error_fatal); 2703482969d6SCédric Le Goater 27047b565160SDavid Gibson /* Set up Interrupt Controller before we create the VCPUs */ 2705fab397d8SCédric Le Goater spapr_irq_init(spapr, &error_fatal); 27067b565160SDavid Gibson 2707dc1b5eeeSGreg Kurz /* Set up containers for ibm,client-architecture-support negotiated options 2708dc1b5eeeSGreg Kurz */ 2709facdb8b6SMichael Roth spapr->ov5 = spapr_ovec_new(); 2710facdb8b6SMichael Roth spapr->ov5_cas = spapr_ovec_new(); 2711facdb8b6SMichael Roth 2712224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2713facdb8b6SMichael Roth spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); 27147c150d6fSDavid Gibson spapr_validate_node_memory(machine, &error_fatal); 2715224245bfSDavid Gibson } 2716224245bfSDavid Gibson 2717417ece33SMichael Roth spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); 2718417ece33SMichael Roth 2719ffbb1705SMichael Roth /* advertise support for dedicated HP event source to guests */ 2720ffbb1705SMichael Roth if (spapr->use_hotplug_event_source) { 2721ffbb1705SMichael Roth spapr_ovec_set(spapr->ov5, OV5_HP_EVT); 2722ffbb1705SMichael Roth } 2723ffbb1705SMichael Roth 27242772cf6bSDavid Gibson /* advertise support for HPT resizing */ 27252772cf6bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 27262772cf6bSDavid Gibson spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE); 27272772cf6bSDavid Gibson } 27282772cf6bSDavid Gibson 2729a324d6f1SBharata B Rao /* advertise support for ibm,dyamic-memory-v2 */ 2730a324d6f1SBharata B Rao spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2); 2731a324d6f1SBharata B Rao 2732db592b5bSCédric Le Goater /* advertise XIVE on POWER9 machines */ 2733ca62823bSDavid Gibson if (spapr->irq->xive) { 2734db592b5bSCédric Le Goater spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); 2735db592b5bSCédric Le Goater } 2736db592b5bSCédric Le Goater 273753018216SPaolo Bonzini /* init CPUs */ 27380c86d0fdSDavid Gibson spapr_init_cpus(spapr); 273953018216SPaolo Bonzini 274058c46efaSLaurent Vivier /* 274158c46efaSLaurent Vivier * check we don't have a memory-less/cpu-less NUMA node 274258c46efaSLaurent Vivier * Firmware relies on the existing memory/cpu topology to provide the 274358c46efaSLaurent Vivier * NUMA topology to the kernel. 274458c46efaSLaurent Vivier * And the linux kernel needs to know the NUMA topology at start 274558c46efaSLaurent Vivier * to be able to hotplug CPUs later. 274658c46efaSLaurent Vivier */ 274758c46efaSLaurent Vivier if (machine->numa_state->num_nodes) { 274858c46efaSLaurent Vivier for (i = 0; i < machine->numa_state->num_nodes; ++i) { 274958c46efaSLaurent Vivier /* check for memory-less node */ 275058c46efaSLaurent Vivier if (machine->numa_state->nodes[i].node_mem == 0) { 275158c46efaSLaurent Vivier CPUState *cs; 275258c46efaSLaurent Vivier int found = 0; 275358c46efaSLaurent Vivier /* check for cpu-less node */ 275458c46efaSLaurent Vivier CPU_FOREACH(cs) { 275558c46efaSLaurent Vivier PowerPCCPU *cpu = POWERPC_CPU(cs); 275658c46efaSLaurent Vivier if (cpu->node_id == i) { 275758c46efaSLaurent Vivier found = 1; 275858c46efaSLaurent Vivier break; 275958c46efaSLaurent Vivier } 276058c46efaSLaurent Vivier } 276158c46efaSLaurent Vivier /* memory-less and cpu-less node */ 276258c46efaSLaurent Vivier if (!found) { 276358c46efaSLaurent Vivier error_report( 276458c46efaSLaurent Vivier "Memory-less/cpu-less nodes are not supported (node %d)", 276558c46efaSLaurent Vivier i); 276658c46efaSLaurent Vivier exit(1); 276758c46efaSLaurent Vivier } 276858c46efaSLaurent Vivier } 276958c46efaSLaurent Vivier } 277058c46efaSLaurent Vivier 277158c46efaSLaurent Vivier } 277258c46efaSLaurent Vivier 2773*66407069SDaniel Henrique Barboza spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine); 2774db5127b2SDavid Gibson 2775f1aa45ffSDaniel Henrique Barboza /* Init numa_assoc_array */ 2776f1aa45ffSDaniel Henrique Barboza spapr_numa_associativity_init(spapr, machine); 2777f1aa45ffSDaniel Henrique Barboza 27780550b120SGreg Kurz if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) && 2779ad99d04cSDavid Gibson ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, 27800550b120SGreg Kurz spapr->max_compat_pvr)) { 2781b4b83312SGreg Kurz spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300); 27820550b120SGreg Kurz /* KVM and TCG always allow GTSE with radix... */ 27830550b120SGreg Kurz spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE); 27840550b120SGreg Kurz } 27850550b120SGreg Kurz /* ... but not with hash (currently). */ 27860550b120SGreg Kurz 2787026bfd89SDavid Gibson if (kvm_enabled()) { 2788026bfd89SDavid Gibson /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */ 2789026bfd89SDavid Gibson kvmppc_enable_logical_ci_hcalls(); 2790ef9971ddSAlexey Kardashevskiy kvmppc_enable_set_mode_hcall(); 27915145ad4fSNathan Whitehorn 27925145ad4fSNathan Whitehorn /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */ 27935145ad4fSNathan Whitehorn kvmppc_enable_clear_ref_mod_hcalls(); 279468f9f708SSuraj Jitindar Singh 279568f9f708SSuraj Jitindar Singh /* Enable H_PAGE_INIT */ 279668f9f708SSuraj Jitindar Singh kvmppc_enable_h_page_init(); 2797026bfd89SDavid Gibson } 2798026bfd89SDavid Gibson 2799ab74e543SIgor Mammedov /* map RAM */ 2800ab74e543SIgor Mammedov memory_region_add_subregion(sysmem, 0, machine->ram); 280153018216SPaolo Bonzini 2802b0c14ec4SDavid Hildenbrand /* always allocate the device memory information */ 2803b0c14ec4SDavid Hildenbrand machine->device_memory = g_malloc0(sizeof(*machine->device_memory)); 2804b0c14ec4SDavid Hildenbrand 28054a1c9cf0SBharata B Rao /* initialize hotplug memory address space */ 28064a1c9cf0SBharata B Rao if (machine->ram_size < machine->maxram_size) { 28070c9269a5SDavid Hildenbrand ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size; 280871c9a3ddSBharata B Rao /* 280971c9a3ddSBharata B Rao * Limit the number of hotpluggable memory slots to half the number 281071c9a3ddSBharata B Rao * slots that KVM supports, leaving the other half for PCI and other 281171c9a3ddSBharata B Rao * devices. However ensure that number of slots doesn't drop below 32. 281271c9a3ddSBharata B Rao */ 281371c9a3ddSBharata B Rao int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 : 281471c9a3ddSBharata B Rao SPAPR_MAX_RAM_SLOTS; 28154a1c9cf0SBharata B Rao 281671c9a3ddSBharata B Rao if (max_memslots < SPAPR_MAX_RAM_SLOTS) { 281771c9a3ddSBharata B Rao max_memslots = SPAPR_MAX_RAM_SLOTS; 281871c9a3ddSBharata B Rao } 281971c9a3ddSBharata B Rao if (machine->ram_slots > max_memslots) { 2820d54e4d76SDavid Gibson error_report("Specified number of memory slots %" 2821d54e4d76SDavid Gibson PRIu64" exceeds max supported %d", 282271c9a3ddSBharata B Rao machine->ram_slots, max_memslots); 2823d54e4d76SDavid Gibson exit(1); 28244a1c9cf0SBharata B Rao } 28254a1c9cf0SBharata B Rao 2826b0c14ec4SDavid Hildenbrand machine->device_memory->base = ROUND_UP(machine->ram_size, 28270c9269a5SDavid Hildenbrand SPAPR_DEVICE_MEM_ALIGN); 2828b0c14ec4SDavid Hildenbrand memory_region_init(&machine->device_memory->mr, OBJECT(spapr), 28290c9269a5SDavid Hildenbrand "device-memory", device_mem_size); 2830b0c14ec4SDavid Hildenbrand memory_region_add_subregion(sysmem, machine->device_memory->base, 2831b0c14ec4SDavid Hildenbrand &machine->device_memory->mr); 28324a1c9cf0SBharata B Rao } 28334a1c9cf0SBharata B Rao 2834224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2835224245bfSDavid Gibson spapr_create_lmb_dr_connectors(spapr); 2836224245bfSDavid Gibson } 2837224245bfSDavid Gibson 28388af7e1feSNicholas Piggin if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_ON) { 28392500fb42SAravinda Prasad /* Create the error string for live migration blocker */ 28402500fb42SAravinda Prasad error_setg(&spapr->fwnmi_migration_blocker, 28412500fb42SAravinda Prasad "A machine check is being handled during migration. The handler" 28422500fb42SAravinda Prasad "may run and log hardware error on the destination"); 28432500fb42SAravinda Prasad } 28442500fb42SAravinda Prasad 2845ee3a71e3SShivaprasad G Bhat if (mc->nvdimm_supported) { 2846ee3a71e3SShivaprasad G Bhat spapr_create_nvdimm_dr_connectors(spapr); 2847ee3a71e3SShivaprasad G Bhat } 2848ee3a71e3SShivaprasad G Bhat 2849ffbb1705SMichael Roth /* Set up RTAS event infrastructure */ 285053018216SPaolo Bonzini spapr_events_init(spapr); 285153018216SPaolo Bonzini 285212f42174SDavid Gibson /* Set up the RTC RTAS interfaces */ 285328df36a1SDavid Gibson spapr_rtc_create(spapr); 285412f42174SDavid Gibson 285553018216SPaolo Bonzini /* Set up VIO bus */ 285653018216SPaolo Bonzini spapr->vio_bus = spapr_vio_bus_init(); 285753018216SPaolo Bonzini 285846ee119fSPaolo Bonzini for (i = 0; serial_hd(i); i++) { 28599bca0edbSPeter Maydell spapr_vty_create(spapr->vio_bus, serial_hd(i)); 286053018216SPaolo Bonzini } 286153018216SPaolo Bonzini 286253018216SPaolo Bonzini /* We always have at least the nvram device on VIO */ 286353018216SPaolo Bonzini spapr_create_nvram(spapr); 286453018216SPaolo Bonzini 2865962b6c36SMichael Roth /* 2866962b6c36SMichael Roth * Setup hotplug / dynamic-reconfiguration connectors. top-level 2867962b6c36SMichael Roth * connectors (described in root DT node's "ibm,drc-types" property) 2868962b6c36SMichael Roth * are pre-initialized here. additional child connectors (such as 2869962b6c36SMichael Roth * connectors for a PHBs PCI slots) are added as needed during their 2870962b6c36SMichael Roth * parent's realization. 2871962b6c36SMichael Roth */ 2872962b6c36SMichael Roth if (smc->dr_phb_enabled) { 2873962b6c36SMichael Roth for (i = 0; i < SPAPR_MAX_PHBS; i++) { 2874962b6c36SMichael Roth spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i); 2875962b6c36SMichael Roth } 2876962b6c36SMichael Roth } 2877962b6c36SMichael Roth 287853018216SPaolo Bonzini /* Set up PCI */ 287953018216SPaolo Bonzini spapr_pci_rtas_init(); 288053018216SPaolo Bonzini 2881999c9cafSGreg Kurz phb = spapr_create_default_phb(); 288253018216SPaolo Bonzini 288353018216SPaolo Bonzini for (i = 0; i < nb_nics; i++) { 288453018216SPaolo Bonzini NICInfo *nd = &nd_table[i]; 288553018216SPaolo Bonzini 288653018216SPaolo Bonzini if (!nd->model) { 28873c3a4e7aSThomas Huth nd->model = g_strdup("spapr-vlan"); 288853018216SPaolo Bonzini } 288953018216SPaolo Bonzini 28903c3a4e7aSThomas Huth if (g_str_equal(nd->model, "spapr-vlan") || 28913c3a4e7aSThomas Huth g_str_equal(nd->model, "ibmveth")) { 289253018216SPaolo Bonzini spapr_vlan_create(spapr->vio_bus, nd); 289353018216SPaolo Bonzini } else { 289429b358f9SDavid Gibson pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); 289553018216SPaolo Bonzini } 289653018216SPaolo Bonzini } 289753018216SPaolo Bonzini 289853018216SPaolo Bonzini for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { 289953018216SPaolo Bonzini spapr_vscsi_create(spapr->vio_bus); 290053018216SPaolo Bonzini } 290153018216SPaolo Bonzini 290253018216SPaolo Bonzini /* Graphics */ 290314c6a894SDavid Gibson if (spapr_vga_init(phb->bus, &error_fatal)) { 290453018216SPaolo Bonzini spapr->has_graphics = true; 2905c6e76503SPaolo Bonzini machine->usb |= defaults_enabled() && !machine->usb_disabled; 290653018216SPaolo Bonzini } 290753018216SPaolo Bonzini 29084ee9ced9SMarcel Apfelbaum if (machine->usb) { 290957040d45SThomas Huth if (smc->use_ohci_by_default) { 291053018216SPaolo Bonzini pci_create_simple(phb->bus, -1, "pci-ohci"); 291157040d45SThomas Huth } else { 291257040d45SThomas Huth pci_create_simple(phb->bus, -1, "nec-usb-xhci"); 291357040d45SThomas Huth } 2914c86580b8SMarkus Armbruster 291553018216SPaolo Bonzini if (spapr->has_graphics) { 2916c86580b8SMarkus Armbruster USBBus *usb_bus = usb_bus_find(-1); 2917c86580b8SMarkus Armbruster 2918c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-kbd"); 2919c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-mouse"); 292053018216SPaolo Bonzini } 292153018216SPaolo Bonzini } 292253018216SPaolo Bonzini 292353018216SPaolo Bonzini if (kernel_filename) { 29244366e1dbSLiam Merwick spapr->kernel_size = load_elf(kernel_filename, NULL, 292587262806SAlexey Kardashevskiy translate_kernel_address, spapr, 2926617160c9SBALATON Zoltan NULL, NULL, NULL, NULL, 1, 2927a19f7fb0SDavid Gibson PPC_ELF_MACHINE, 0, 0); 2928a19f7fb0SDavid Gibson if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) { 29294366e1dbSLiam Merwick spapr->kernel_size = load_elf(kernel_filename, NULL, 2930617160c9SBALATON Zoltan translate_kernel_address, spapr, 2931617160c9SBALATON Zoltan NULL, NULL, NULL, NULL, 0, 2932617160c9SBALATON Zoltan PPC_ELF_MACHINE, 0, 0); 2933a19f7fb0SDavid Gibson spapr->kernel_le = spapr->kernel_size > 0; 293416457e7fSBenjamin Herrenschmidt } 2935a19f7fb0SDavid Gibson if (spapr->kernel_size < 0) { 2936a19f7fb0SDavid Gibson error_report("error loading %s: %s", kernel_filename, 2937a19f7fb0SDavid Gibson load_elf_strerror(spapr->kernel_size)); 293853018216SPaolo Bonzini exit(1); 293953018216SPaolo Bonzini } 294053018216SPaolo Bonzini 294153018216SPaolo Bonzini /* load initrd */ 294253018216SPaolo Bonzini if (initrd_filename) { 294353018216SPaolo Bonzini /* Try to locate the initrd in the gap between the kernel 294453018216SPaolo Bonzini * and the firmware. Add a bit of space just in case 294553018216SPaolo Bonzini */ 294687262806SAlexey Kardashevskiy spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size 2947a19f7fb0SDavid Gibson + 0x1ffff) & ~0xffff; 2948a19f7fb0SDavid Gibson spapr->initrd_size = load_image_targphys(initrd_filename, 2949a19f7fb0SDavid Gibson spapr->initrd_base, 2950a19f7fb0SDavid Gibson load_limit 2951a19f7fb0SDavid Gibson - spapr->initrd_base); 2952a19f7fb0SDavid Gibson if (spapr->initrd_size < 0) { 2953d54e4d76SDavid Gibson error_report("could not load initial ram disk '%s'", 295453018216SPaolo Bonzini initrd_filename); 295553018216SPaolo Bonzini exit(1); 295653018216SPaolo Bonzini } 295753018216SPaolo Bonzini } 295853018216SPaolo Bonzini } 295953018216SPaolo Bonzini 29608e7ea787SAndreas Färber filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 29614c56440dSStefan Weil if (!filename) { 296268fea5a0SThomas Huth error_report("Could not find LPAR firmware '%s'", bios_name); 29634c56440dSStefan Weil exit(1); 29644c56440dSStefan Weil } 296553018216SPaolo Bonzini fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); 296668fea5a0SThomas Huth if (fw_size <= 0) { 296768fea5a0SThomas Huth error_report("Could not load LPAR firmware '%s'", filename); 296853018216SPaolo Bonzini exit(1); 296953018216SPaolo Bonzini } 297053018216SPaolo Bonzini g_free(filename); 297153018216SPaolo Bonzini 297228e02042SDavid Gibson /* FIXME: Should register things through the MachineState's qdev 297328e02042SDavid Gibson * interface, this is a legacy from the sPAPREnvironment structure 297428e02042SDavid Gibson * which predated MachineState but had a similar function */ 29754be21d56SDavid Gibson vmstate_register(NULL, 0, &vmstate_spapr, spapr); 29761df2c9a2SPeter Xu register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1, 29774be21d56SDavid Gibson &savevm_htab_handlers, spapr); 29784be21d56SDavid Gibson 29799bc6bfdfSMarkus Armbruster qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine)); 2980bb2bdd81SGreg Kurz 29815b2128d2SAlexander Graf qemu_register_boot_set(spapr_boot_set, spapr); 298242043e4fSLaurent Vivier 298393eac7b8SNicholas Piggin /* 298493eac7b8SNicholas Piggin * Nothing needs to be done to resume a suspended guest because 298593eac7b8SNicholas Piggin * suspending does not change the machine state, so no need for 298693eac7b8SNicholas Piggin * a ->wakeup method. 298793eac7b8SNicholas Piggin */ 298893eac7b8SNicholas Piggin qemu_register_wakeup_support(); 298993eac7b8SNicholas Piggin 299042043e4fSLaurent Vivier if (kvm_enabled()) { 29913dc410aeSAlexey Kardashevskiy /* to stop and start vmclock */ 299242043e4fSLaurent Vivier qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change, 299342043e4fSLaurent Vivier &spapr->tb); 29943dc410aeSAlexey Kardashevskiy 29953dc410aeSAlexey Kardashevskiy kvmppc_spapr_enable_inkernel_multitce(); 299642043e4fSLaurent Vivier } 29979ac703acSAravinda Prasad 29988af7e1feSNicholas Piggin qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond); 299953018216SPaolo Bonzini } 300053018216SPaolo Bonzini 300107b10bc4SDaniel Henrique Barboza #define DEFAULT_KVM_TYPE "auto" 3002dc0ca80eSEric Auger static int spapr_kvm_type(MachineState *machine, const char *vm_type) 3003135a129aSAneesh Kumar K.V { 300407b10bc4SDaniel Henrique Barboza /* 300507b10bc4SDaniel Henrique Barboza * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to 300607b10bc4SDaniel Henrique Barboza * accomodate the 'HV' and 'PV' formats that exists in the 300707b10bc4SDaniel Henrique Barboza * wild. The 'auto' mode is being introduced already as 300807b10bc4SDaniel Henrique Barboza * lower-case, thus we don't need to bother checking for 300907b10bc4SDaniel Henrique Barboza * "AUTO". 301007b10bc4SDaniel Henrique Barboza */ 301107b10bc4SDaniel Henrique Barboza if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) { 3012135a129aSAneesh Kumar K.V return 0; 3013135a129aSAneesh Kumar K.V } 3014135a129aSAneesh Kumar K.V 301507b10bc4SDaniel Henrique Barboza if (!g_ascii_strcasecmp(vm_type, "hv")) { 3016135a129aSAneesh Kumar K.V return 1; 3017135a129aSAneesh Kumar K.V } 3018135a129aSAneesh Kumar K.V 301907b10bc4SDaniel Henrique Barboza if (!g_ascii_strcasecmp(vm_type, "pr")) { 3020135a129aSAneesh Kumar K.V return 2; 3021135a129aSAneesh Kumar K.V } 3022135a129aSAneesh Kumar K.V 3023135a129aSAneesh Kumar K.V error_report("Unknown kvm-type specified '%s'", vm_type); 3024135a129aSAneesh Kumar K.V exit(1); 3025135a129aSAneesh Kumar K.V } 3026135a129aSAneesh Kumar K.V 302771461b0fSAlexey Kardashevskiy /* 3028627b84f4SGonglei * Implementation of an interface to adjust firmware path 302971461b0fSAlexey Kardashevskiy * for the bootindex property handling. 303071461b0fSAlexey Kardashevskiy */ 303171461b0fSAlexey Kardashevskiy static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus, 303271461b0fSAlexey Kardashevskiy DeviceState *dev) 303371461b0fSAlexey Kardashevskiy { 303471461b0fSAlexey Kardashevskiy #define CAST(type, obj, name) \ 303571461b0fSAlexey Kardashevskiy ((type *)object_dynamic_cast(OBJECT(obj), (name))) 303671461b0fSAlexey Kardashevskiy SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE); 3037ce2918cbSDavid Gibson SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE); 3038c4e13492SFelipe Franciosi VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON); 3039040bdafcSGreg Kurz PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); 304071461b0fSAlexey Kardashevskiy 304171461b0fSAlexey Kardashevskiy if (d) { 304271461b0fSAlexey Kardashevskiy void *spapr = CAST(void, bus->parent, "spapr-vscsi"); 304371461b0fSAlexey Kardashevskiy VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI); 304471461b0fSAlexey Kardashevskiy USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE); 304571461b0fSAlexey Kardashevskiy 304671461b0fSAlexey Kardashevskiy if (spapr) { 304771461b0fSAlexey Kardashevskiy /* 304871461b0fSAlexey Kardashevskiy * Replace "channel@0/disk@0,0" with "disk@8000000000000000": 30491ac24c91SThomas Huth * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form 30501ac24c91SThomas Huth * 0x8000 | (target << 8) | (bus << 5) | lun 30511ac24c91SThomas Huth * (see the "Logical unit addressing format" table in SAM5) 305271461b0fSAlexey Kardashevskiy */ 30531ac24c91SThomas Huth unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun; 305471461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 305571461b0fSAlexey Kardashevskiy (uint64_t)id << 48); 305671461b0fSAlexey Kardashevskiy } else if (virtio) { 305771461b0fSAlexey Kardashevskiy /* 305871461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (target << 8) | lun 305971461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 306071461b0fSAlexey Kardashevskiy * Note: the quote above is from SLOF and it is wrong, 306171461b0fSAlexey Kardashevskiy * the actual binding is: 306271461b0fSAlexey Kardashevskiy * swap 0100 or 10 << or 20 << ( target lun-id -- srplun ) 306371461b0fSAlexey Kardashevskiy */ 306471461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (d->id << 16) | d->lun; 3065bac658d1SThomas Huth if (d->lun >= 256) { 3066bac658d1SThomas Huth /* Use the LUN "flat space addressing method" */ 3067bac658d1SThomas Huth id |= 0x4000; 3068bac658d1SThomas Huth } 306971461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 307071461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 307171461b0fSAlexey Kardashevskiy } else if (usb) { 307271461b0fSAlexey Kardashevskiy /* 307371461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (usb-port << 16) | lun 307471461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 307571461b0fSAlexey Kardashevskiy */ 307671461b0fSAlexey Kardashevskiy unsigned usb_port = atoi(usb->port->path); 307771461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (usb_port << 16) | d->lun; 307871461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 307971461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 308071461b0fSAlexey Kardashevskiy } 308171461b0fSAlexey Kardashevskiy } 308271461b0fSAlexey Kardashevskiy 3083b99260ebSThomas Huth /* 3084b99260ebSThomas Huth * SLOF probes the USB devices, and if it recognizes that the device is a 3085b99260ebSThomas Huth * storage device, it changes its name to "storage" instead of "usb-host", 3086b99260ebSThomas Huth * and additionally adds a child node for the SCSI LUN, so the correct 3087b99260ebSThomas Huth * boot path in SLOF is something like .../storage@1/disk@xxx" instead. 3088b99260ebSThomas Huth */ 3089b99260ebSThomas Huth if (strcmp("usb-host", qdev_fw_name(dev)) == 0) { 3090b99260ebSThomas Huth USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE); 3091b99260ebSThomas Huth if (usb_host_dev_is_scsi_storage(usbdev)) { 3092b99260ebSThomas Huth return g_strdup_printf("storage@%s/disk", usbdev->port->path); 3093b99260ebSThomas Huth } 3094b99260ebSThomas Huth } 3095b99260ebSThomas Huth 309671461b0fSAlexey Kardashevskiy if (phb) { 309771461b0fSAlexey Kardashevskiy /* Replace "pci" with "pci@800000020000000" */ 309871461b0fSAlexey Kardashevskiy return g_strdup_printf("pci@%"PRIX64, phb->buid); 309971461b0fSAlexey Kardashevskiy } 310071461b0fSAlexey Kardashevskiy 3101c4e13492SFelipe Franciosi if (vsc) { 3102c4e13492SFelipe Franciosi /* Same logic as virtio above */ 3103c4e13492SFelipe Franciosi unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun; 3104c4e13492SFelipe Franciosi return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32); 3105c4e13492SFelipe Franciosi } 3106c4e13492SFelipe Franciosi 31074871dd4cSThomas Huth if (g_str_equal("pci-bridge", qdev_fw_name(dev))) { 31084871dd4cSThomas Huth /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */ 31094871dd4cSThomas Huth PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); 31104871dd4cSThomas Huth return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn)); 31114871dd4cSThomas Huth } 31124871dd4cSThomas Huth 3113040bdafcSGreg Kurz if (pcidev) { 3114040bdafcSGreg Kurz return spapr_pci_fw_dev_name(pcidev); 3115040bdafcSGreg Kurz } 3116040bdafcSGreg Kurz 311771461b0fSAlexey Kardashevskiy return NULL; 311871461b0fSAlexey Kardashevskiy } 311971461b0fSAlexey Kardashevskiy 312023825581SEduardo Habkost static char *spapr_get_kvm_type(Object *obj, Error **errp) 312123825581SEduardo Habkost { 3122ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 312323825581SEduardo Habkost 312428e02042SDavid Gibson return g_strdup(spapr->kvm_type); 312523825581SEduardo Habkost } 312623825581SEduardo Habkost 312723825581SEduardo Habkost static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp) 312823825581SEduardo Habkost { 3129ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 313023825581SEduardo Habkost 313128e02042SDavid Gibson g_free(spapr->kvm_type); 313228e02042SDavid Gibson spapr->kvm_type = g_strdup(value); 313323825581SEduardo Habkost } 313423825581SEduardo Habkost 3135f6229214SMichael Roth static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp) 3136f6229214SMichael Roth { 3137ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3138f6229214SMichael Roth 3139f6229214SMichael Roth return spapr->use_hotplug_event_source; 3140f6229214SMichael Roth } 3141f6229214SMichael Roth 3142f6229214SMichael Roth static void spapr_set_modern_hotplug_events(Object *obj, bool value, 3143f6229214SMichael Roth Error **errp) 3144f6229214SMichael Roth { 3145ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3146f6229214SMichael Roth 3147f6229214SMichael Roth spapr->use_hotplug_event_source = value; 3148f6229214SMichael Roth } 3149f6229214SMichael Roth 3150fcad0d21SAlexey Kardashevskiy static bool spapr_get_msix_emulation(Object *obj, Error **errp) 3151fcad0d21SAlexey Kardashevskiy { 3152fcad0d21SAlexey Kardashevskiy return true; 3153fcad0d21SAlexey Kardashevskiy } 3154fcad0d21SAlexey Kardashevskiy 315530f4b05bSDavid Gibson static char *spapr_get_resize_hpt(Object *obj, Error **errp) 315630f4b05bSDavid Gibson { 3157ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 315830f4b05bSDavid Gibson 315930f4b05bSDavid Gibson switch (spapr->resize_hpt) { 316030f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DEFAULT: 316130f4b05bSDavid Gibson return g_strdup("default"); 316230f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DISABLED: 316330f4b05bSDavid Gibson return g_strdup("disabled"); 316430f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_ENABLED: 316530f4b05bSDavid Gibson return g_strdup("enabled"); 316630f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_REQUIRED: 316730f4b05bSDavid Gibson return g_strdup("required"); 316830f4b05bSDavid Gibson } 316930f4b05bSDavid Gibson g_assert_not_reached(); 317030f4b05bSDavid Gibson } 317130f4b05bSDavid Gibson 317230f4b05bSDavid Gibson static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp) 317330f4b05bSDavid Gibson { 3174ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 317530f4b05bSDavid Gibson 317630f4b05bSDavid Gibson if (strcmp(value, "default") == 0) { 317730f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT; 317830f4b05bSDavid Gibson } else if (strcmp(value, "disabled") == 0) { 317930f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 318030f4b05bSDavid Gibson } else if (strcmp(value, "enabled") == 0) { 318130f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED; 318230f4b05bSDavid Gibson } else if (strcmp(value, "required") == 0) { 318330f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED; 318430f4b05bSDavid Gibson } else { 318530f4b05bSDavid Gibson error_setg(errp, "Bad value for \"resize-hpt\" property"); 318630f4b05bSDavid Gibson } 318730f4b05bSDavid Gibson } 318830f4b05bSDavid Gibson 31893ba3d0bcSCédric Le Goater static char *spapr_get_ic_mode(Object *obj, Error **errp) 31903ba3d0bcSCédric Le Goater { 3191ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 31923ba3d0bcSCédric Le Goater 31933ba3d0bcSCédric Le Goater if (spapr->irq == &spapr_irq_xics_legacy) { 31943ba3d0bcSCédric Le Goater return g_strdup("legacy"); 31953ba3d0bcSCédric Le Goater } else if (spapr->irq == &spapr_irq_xics) { 31963ba3d0bcSCédric Le Goater return g_strdup("xics"); 31973ba3d0bcSCédric Le Goater } else if (spapr->irq == &spapr_irq_xive) { 31983ba3d0bcSCédric Le Goater return g_strdup("xive"); 319913db0cd9SCédric Le Goater } else if (spapr->irq == &spapr_irq_dual) { 320013db0cd9SCédric Le Goater return g_strdup("dual"); 32013ba3d0bcSCédric Le Goater } 32023ba3d0bcSCédric Le Goater g_assert_not_reached(); 32033ba3d0bcSCédric Le Goater } 32043ba3d0bcSCédric Le Goater 32053ba3d0bcSCédric Le Goater static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp) 32063ba3d0bcSCédric Le Goater { 3207ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 32083ba3d0bcSCédric Le Goater 320921df5e4fSGreg Kurz if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 321021df5e4fSGreg Kurz error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode"); 321121df5e4fSGreg Kurz return; 321221df5e4fSGreg Kurz } 321321df5e4fSGreg Kurz 32143ba3d0bcSCédric Le Goater /* The legacy IRQ backend can not be set */ 32153ba3d0bcSCédric Le Goater if (strcmp(value, "xics") == 0) { 32163ba3d0bcSCédric Le Goater spapr->irq = &spapr_irq_xics; 32173ba3d0bcSCédric Le Goater } else if (strcmp(value, "xive") == 0) { 32183ba3d0bcSCédric Le Goater spapr->irq = &spapr_irq_xive; 321913db0cd9SCédric Le Goater } else if (strcmp(value, "dual") == 0) { 322013db0cd9SCédric Le Goater spapr->irq = &spapr_irq_dual; 32213ba3d0bcSCédric Le Goater } else { 32223ba3d0bcSCédric Le Goater error_setg(errp, "Bad value for \"ic-mode\" property"); 32233ba3d0bcSCédric Le Goater } 32243ba3d0bcSCédric Le Goater } 32253ba3d0bcSCédric Le Goater 322627461d69SPrasad J Pandit static char *spapr_get_host_model(Object *obj, Error **errp) 322727461d69SPrasad J Pandit { 3228ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 322927461d69SPrasad J Pandit 323027461d69SPrasad J Pandit return g_strdup(spapr->host_model); 323127461d69SPrasad J Pandit } 323227461d69SPrasad J Pandit 323327461d69SPrasad J Pandit static void spapr_set_host_model(Object *obj, const char *value, Error **errp) 323427461d69SPrasad J Pandit { 3235ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 323627461d69SPrasad J Pandit 323727461d69SPrasad J Pandit g_free(spapr->host_model); 323827461d69SPrasad J Pandit spapr->host_model = g_strdup(value); 323927461d69SPrasad J Pandit } 324027461d69SPrasad J Pandit 324127461d69SPrasad J Pandit static char *spapr_get_host_serial(Object *obj, Error **errp) 324227461d69SPrasad J Pandit { 3243ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 324427461d69SPrasad J Pandit 324527461d69SPrasad J Pandit return g_strdup(spapr->host_serial); 324627461d69SPrasad J Pandit } 324727461d69SPrasad J Pandit 324827461d69SPrasad J Pandit static void spapr_set_host_serial(Object *obj, const char *value, Error **errp) 324927461d69SPrasad J Pandit { 3250ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 325127461d69SPrasad J Pandit 325227461d69SPrasad J Pandit g_free(spapr->host_serial); 325327461d69SPrasad J Pandit spapr->host_serial = g_strdup(value); 325427461d69SPrasad J Pandit } 325527461d69SPrasad J Pandit 3256bcb5ce08SDavid Gibson static void spapr_instance_init(Object *obj) 325723825581SEduardo Habkost { 3258ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3259ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 326055810e90SIgor Mammedov MachineState *ms = MACHINE(spapr); 326155810e90SIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(ms); 326255810e90SIgor Mammedov 326355810e90SIgor Mammedov /* 326455810e90SIgor Mammedov * NVDIMM support went live in 5.1 without considering that, in 326555810e90SIgor Mammedov * other archs, the user needs to enable NVDIMM support with the 326655810e90SIgor Mammedov * 'nvdimm' machine option and the default behavior is NVDIMM 326755810e90SIgor Mammedov * support disabled. It is too late to roll back to the standard 326855810e90SIgor Mammedov * behavior without breaking 5.1 guests. 326955810e90SIgor Mammedov */ 327055810e90SIgor Mammedov if (mc->nvdimm_supported) { 327155810e90SIgor Mammedov ms->nvdimms_state->is_enabled = true; 327255810e90SIgor Mammedov } 3273715c5407SDavid Gibson 3274715c5407SDavid Gibson spapr->htab_fd = -1; 3275f6229214SMichael Roth spapr->use_hotplug_event_source = true; 327607b10bc4SDaniel Henrique Barboza spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE); 327723825581SEduardo Habkost object_property_add_str(obj, "kvm-type", 3278d2623129SMarkus Armbruster spapr_get_kvm_type, spapr_set_kvm_type); 327949d2e648SMarcel Apfelbaum object_property_set_description(obj, "kvm-type", 328007b10bc4SDaniel Henrique Barboza "Specifies the KVM virtualization mode (auto," 328107b10bc4SDaniel Henrique Barboza " hv, pr). Defaults to 'auto'. This mode will use" 328207b10bc4SDaniel Henrique Barboza " any available KVM module loaded in the host," 328307b10bc4SDaniel Henrique Barboza " where kvm_hv takes precedence if both kvm_hv and" 328407b10bc4SDaniel Henrique Barboza " kvm_pr are loaded."); 3285f6229214SMichael Roth object_property_add_bool(obj, "modern-hotplug-events", 3286f6229214SMichael Roth spapr_get_modern_hotplug_events, 3287d2623129SMarkus Armbruster spapr_set_modern_hotplug_events); 3288f6229214SMichael Roth object_property_set_description(obj, "modern-hotplug-events", 3289f6229214SMichael Roth "Use dedicated hotplug event mechanism in" 3290f6229214SMichael Roth " place of standard EPOW events when possible" 32917eecec7dSMarkus Armbruster " (required for memory hot-unplug support)"); 32927843c0d6SDavid Gibson ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr, 329340c2281cSMarkus Armbruster "Maximum permitted CPU compatibility mode"); 329430f4b05bSDavid Gibson 329530f4b05bSDavid Gibson object_property_add_str(obj, "resize-hpt", 3296d2623129SMarkus Armbruster spapr_get_resize_hpt, spapr_set_resize_hpt); 329730f4b05bSDavid Gibson object_property_set_description(obj, "resize-hpt", 32987eecec7dSMarkus Armbruster "Resizing of the Hash Page Table (enabled, disabled, required)"); 329964a7b8deSFelipe Franciosi object_property_add_uint32_ptr(obj, "vsmt", 3300d2623129SMarkus Armbruster &spapr->vsmt, OBJ_PROP_FLAG_READWRITE); 3301fa98fbfcSSam Bobroff object_property_set_description(obj, "vsmt", 3302fa98fbfcSSam Bobroff "Virtual SMT: KVM behaves as if this were" 33037eecec7dSMarkus Armbruster " the host's SMT mode"); 330464a7b8deSFelipe Franciosi 3305fcad0d21SAlexey Kardashevskiy object_property_add_bool(obj, "vfio-no-msix-emulation", 3306d2623129SMarkus Armbruster spapr_get_msix_emulation, NULL); 33073ba3d0bcSCédric Le Goater 330864a7b8deSFelipe Franciosi object_property_add_uint64_ptr(obj, "kernel-addr", 3309d2623129SMarkus Armbruster &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE); 331087262806SAlexey Kardashevskiy object_property_set_description(obj, "kernel-addr", 331187262806SAlexey Kardashevskiy stringify(KERNEL_LOAD_ADDR) 33127eecec7dSMarkus Armbruster " for -kernel is the default"); 331387262806SAlexey Kardashevskiy spapr->kernel_addr = KERNEL_LOAD_ADDR; 33143ba3d0bcSCédric Le Goater /* The machine class defines the default interrupt controller mode */ 33153ba3d0bcSCédric Le Goater spapr->irq = smc->irq; 33163ba3d0bcSCédric Le Goater object_property_add_str(obj, "ic-mode", spapr_get_ic_mode, 3317d2623129SMarkus Armbruster spapr_set_ic_mode); 33183ba3d0bcSCédric Le Goater object_property_set_description(obj, "ic-mode", 33197eecec7dSMarkus Armbruster "Specifies the interrupt controller mode (xics, xive, dual)"); 332027461d69SPrasad J Pandit 332127461d69SPrasad J Pandit object_property_add_str(obj, "host-model", 3322d2623129SMarkus Armbruster spapr_get_host_model, spapr_set_host_model); 332327461d69SPrasad J Pandit object_property_set_description(obj, "host-model", 33247eecec7dSMarkus Armbruster "Host model to advertise in guest device tree"); 332527461d69SPrasad J Pandit object_property_add_str(obj, "host-serial", 3326d2623129SMarkus Armbruster spapr_get_host_serial, spapr_set_host_serial); 332727461d69SPrasad J Pandit object_property_set_description(obj, "host-serial", 33287eecec7dSMarkus Armbruster "Host serial number to advertise in guest device tree"); 332923825581SEduardo Habkost } 333023825581SEduardo Habkost 333187bbdd9cSDavid Gibson static void spapr_machine_finalizefn(Object *obj) 333287bbdd9cSDavid Gibson { 3333ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 333487bbdd9cSDavid Gibson 333587bbdd9cSDavid Gibson g_free(spapr->kvm_type); 333687bbdd9cSDavid Gibson } 333787bbdd9cSDavid Gibson 33381c7ad77eSNicholas Piggin void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg) 333934316482SAlexey Kardashevskiy { 33400e236d34SNicholas Piggin SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 3341b5b7f391SNicholas Piggin PowerPCCPU *cpu = POWERPC_CPU(cs); 3342b5b7f391SNicholas Piggin CPUPPCState *env = &cpu->env; 33430e236d34SNicholas Piggin 334434316482SAlexey Kardashevskiy cpu_synchronize_state(cs); 33450e236d34SNicholas Piggin /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */ 33460e236d34SNicholas Piggin if (spapr->fwnmi_system_reset_addr != -1) { 33470e236d34SNicholas Piggin uint64_t rtas_addr, addr; 33480e236d34SNicholas Piggin 33490e236d34SNicholas Piggin /* get rtas addr from fdt */ 33500e236d34SNicholas Piggin rtas_addr = spapr_get_rtas_addr(); 33510e236d34SNicholas Piggin if (!rtas_addr) { 33520e236d34SNicholas Piggin qemu_system_guest_panicked(NULL); 33530e236d34SNicholas Piggin return; 33540e236d34SNicholas Piggin } 33550e236d34SNicholas Piggin 33560e236d34SNicholas Piggin addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2; 33570e236d34SNicholas Piggin stq_be_phys(&address_space_memory, addr, env->gpr[3]); 33580e236d34SNicholas Piggin stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0); 33590e236d34SNicholas Piggin env->gpr[3] = addr; 33600e236d34SNicholas Piggin } 3361b5b7f391SNicholas Piggin ppc_cpu_do_system_reset(cs); 3362b5b7f391SNicholas Piggin if (spapr->fwnmi_system_reset_addr != -1) { 3363b5b7f391SNicholas Piggin env->nip = spapr->fwnmi_system_reset_addr; 3364b5b7f391SNicholas Piggin } 336534316482SAlexey Kardashevskiy } 336634316482SAlexey Kardashevskiy 336734316482SAlexey Kardashevskiy static void spapr_nmi(NMIState *n, int cpu_index, Error **errp) 336834316482SAlexey Kardashevskiy { 336934316482SAlexey Kardashevskiy CPUState *cs; 337034316482SAlexey Kardashevskiy 337134316482SAlexey Kardashevskiy CPU_FOREACH(cs) { 33721c7ad77eSNicholas Piggin async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); 337334316482SAlexey Kardashevskiy } 337434316482SAlexey Kardashevskiy } 337534316482SAlexey Kardashevskiy 3376ce2918cbSDavid Gibson int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 337762d38c9bSGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 337862d38c9bSGreg Kurz { 337962d38c9bSGreg Kurz uint64_t addr; 338062d38c9bSGreg Kurz uint32_t node; 338162d38c9bSGreg Kurz 338262d38c9bSGreg Kurz addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE; 338362d38c9bSGreg Kurz node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP, 338462d38c9bSGreg Kurz &error_abort); 3385f1aa45ffSDaniel Henrique Barboza *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr, 338662d38c9bSGreg Kurz SPAPR_MEMORY_BLOCK_SIZE); 338762d38c9bSGreg Kurz return 0; 338862d38c9bSGreg Kurz } 338962d38c9bSGreg Kurz 3390ea042c53SGreg Kurz static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size, 3391ea042c53SGreg Kurz bool dedicated_hp_event_source) 3392c20d332aSBharata B Rao { 3393ce2918cbSDavid Gibson SpaprDrc *drc; 3394c20d332aSBharata B Rao uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE; 339562d38c9bSGreg Kurz int i; 339679b78a6bSMichael Roth uint64_t addr = addr_start; 339794fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 3398c20d332aSBharata B Rao 3399c20d332aSBharata B Rao for (i = 0; i < nr_lmbs; i++) { 3400fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 3401c20d332aSBharata B Rao addr / SPAPR_MEMORY_BLOCK_SIZE); 3402c20d332aSBharata B Rao g_assert(drc); 3403c20d332aSBharata B Rao 3404ea042c53SGreg Kurz /* 3405ea042c53SGreg Kurz * memory_device_get_free_addr() provided a range of free addresses 3406ea042c53SGreg Kurz * that doesn't overlap with any existing mapping at pre-plug. The 3407ea042c53SGreg Kurz * corresponding LMB DRCs are thus assumed to be all attachable. 3408ea042c53SGreg Kurz */ 3409bc370a65SGreg Kurz spapr_drc_attach(drc, dev); 341094fd9cbaSLaurent Vivier if (!hotplugged) { 341194fd9cbaSLaurent Vivier spapr_drc_reset(drc); 341294fd9cbaSLaurent Vivier } 3413c20d332aSBharata B Rao addr += SPAPR_MEMORY_BLOCK_SIZE; 3414c20d332aSBharata B Rao } 34155dd5238cSJianjun Duan /* send hotplug notification to the 34165dd5238cSJianjun Duan * guest only in case of hotplugged memory 34175dd5238cSJianjun Duan */ 341894fd9cbaSLaurent Vivier if (hotplugged) { 341979b78a6bSMichael Roth if (dedicated_hp_event_source) { 3420fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 342179b78a6bSMichael Roth addr_start / SPAPR_MEMORY_BLOCK_SIZE); 342273231f7cSGreg Kurz g_assert(drc); 342379b78a6bSMichael Roth spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 342479b78a6bSMichael Roth nr_lmbs, 34250b55aa91SDavid Gibson spapr_drc_index(drc)); 342679b78a6bSMichael Roth } else { 342779b78a6bSMichael Roth spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB, 342879b78a6bSMichael Roth nr_lmbs); 342979b78a6bSMichael Roth } 3430c20d332aSBharata B Rao } 34315dd5238cSJianjun Duan } 3432c20d332aSBharata B Rao 3433ea042c53SGreg Kurz static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev) 3434c20d332aSBharata B Rao { 3435ce2918cbSDavid Gibson SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev); 3436c20d332aSBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 3437581778ddSGreg Kurz uint64_t size, addr; 3438581778ddSGreg Kurz int64_t slot; 3439ee3a71e3SShivaprasad G Bhat bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); 344004790978SThomas Huth 3441946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort); 3442df587133SThomas Huth 344384fd5496SGreg Kurz pc_dimm_plug(dimm, MACHINE(ms)); 3444c20d332aSBharata B Rao 3445ee3a71e3SShivaprasad G Bhat if (!is_nvdimm) { 34469ed442b8SMarc-André Lureau addr = object_property_get_uint(OBJECT(dimm), 3447271ced1dSGreg Kurz PC_DIMM_ADDR_PROP, &error_abort); 3448ea042c53SGreg Kurz spapr_add_lmbs(dev, addr, size, 3449ea042c53SGreg Kurz spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT)); 3450ee3a71e3SShivaprasad G Bhat } else { 3451581778ddSGreg Kurz slot = object_property_get_int(OBJECT(dimm), 3452271ced1dSGreg Kurz PC_DIMM_SLOT_PROP, &error_abort); 3453581778ddSGreg Kurz /* We should have valid slot number at this point */ 3454581778ddSGreg Kurz g_assert(slot >= 0); 3455ea042c53SGreg Kurz spapr_add_nvdimm(dev, slot); 3456160bb678SGreg Kurz } 34576e837f98SGreg Kurz } 3458c20d332aSBharata B Rao 3459c871bc70SLaurent Vivier static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3460c871bc70SLaurent Vivier Error **errp) 3461c871bc70SLaurent Vivier { 3462ce2918cbSDavid Gibson const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev); 3463ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3464ee3a71e3SShivaprasad G Bhat bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); 3465c871bc70SLaurent Vivier PCDIMMDevice *dimm = PC_DIMM(dev); 34668f1ffe5bSDavid Hildenbrand Error *local_err = NULL; 346704790978SThomas Huth uint64_t size; 3468123eec65SDavid Gibson Object *memdev; 3469123eec65SDavid Gibson hwaddr pagesize; 3470c871bc70SLaurent Vivier 34714e8a01bdSDavid Hildenbrand if (!smc->dr_lmb_enabled) { 34724e8a01bdSDavid Hildenbrand error_setg(errp, "Memory hotplug not supported for this machine"); 34734e8a01bdSDavid Hildenbrand return; 34744e8a01bdSDavid Hildenbrand } 34754e8a01bdSDavid Hildenbrand 3476946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err); 3477946d6154SDavid Hildenbrand if (local_err) { 3478946d6154SDavid Hildenbrand error_propagate(errp, local_err); 347904790978SThomas Huth return; 348004790978SThomas Huth } 348104790978SThomas Huth 3482beb6073fSDaniel Henrique Barboza if (is_nvdimm) { 3483451c6905SGreg Kurz if (!spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, errp)) { 3484ee3a71e3SShivaprasad G Bhat return; 3485ee3a71e3SShivaprasad G Bhat } 3486beb6073fSDaniel Henrique Barboza } else if (size % SPAPR_MEMORY_BLOCK_SIZE) { 3487beb6073fSDaniel Henrique Barboza error_setg(errp, "Hotplugged memory size must be a multiple of " 3488beb6073fSDaniel Henrique Barboza "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB); 3489beb6073fSDaniel Henrique Barboza return; 3490c871bc70SLaurent Vivier } 3491c871bc70SLaurent Vivier 3492123eec65SDavid Gibson memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, 3493123eec65SDavid Gibson &error_abort); 3494123eec65SDavid Gibson pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev)); 349535dce34fSGreg Kurz if (!spapr_check_pagesize(spapr, pagesize, errp)) { 34968f1ffe5bSDavid Hildenbrand return; 34978f1ffe5bSDavid Hildenbrand } 34988f1ffe5bSDavid Hildenbrand 3499fd3416f5SDavid Hildenbrand pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp); 3500c871bc70SLaurent Vivier } 3501c871bc70SLaurent Vivier 3502ce2918cbSDavid Gibson struct SpaprDimmState { 35030cffce56SDavid Gibson PCDIMMDevice *dimm; 3504cf632463SBharata B Rao uint32_t nr_lmbs; 3505ce2918cbSDavid Gibson QTAILQ_ENTRY(SpaprDimmState) next; 35060cffce56SDavid Gibson }; 35070cffce56SDavid Gibson 3508ce2918cbSDavid Gibson static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s, 35090cffce56SDavid Gibson PCDIMMDevice *dimm) 35100cffce56SDavid Gibson { 3511ce2918cbSDavid Gibson SpaprDimmState *dimm_state = NULL; 35120cffce56SDavid Gibson 35130cffce56SDavid Gibson QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) { 35140cffce56SDavid Gibson if (dimm_state->dimm == dimm) { 35150cffce56SDavid Gibson break; 35160cffce56SDavid Gibson } 35170cffce56SDavid Gibson } 35180cffce56SDavid Gibson return dimm_state; 35190cffce56SDavid Gibson } 35200cffce56SDavid Gibson 3521ce2918cbSDavid Gibson static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr, 35228d5981c4SBharata B Rao uint32_t nr_lmbs, 35238d5981c4SBharata B Rao PCDIMMDevice *dimm) 35240cffce56SDavid Gibson { 3525ce2918cbSDavid Gibson SpaprDimmState *ds = NULL; 35268d5981c4SBharata B Rao 35278d5981c4SBharata B Rao /* 35288d5981c4SBharata B Rao * If this request is for a DIMM whose removal had failed earlier 35298d5981c4SBharata B Rao * (due to guest's refusal to remove the LMBs), we would have this 35308d5981c4SBharata B Rao * dimm already in the pending_dimm_unplugs list. In that 35318d5981c4SBharata B Rao * case don't add again. 35328d5981c4SBharata B Rao */ 35338d5981c4SBharata B Rao ds = spapr_pending_dimm_unplugs_find(spapr, dimm); 35348d5981c4SBharata B Rao if (!ds) { 3535ce2918cbSDavid Gibson ds = g_malloc0(sizeof(SpaprDimmState)); 35368d5981c4SBharata B Rao ds->nr_lmbs = nr_lmbs; 35378d5981c4SBharata B Rao ds->dimm = dimm; 35388d5981c4SBharata B Rao QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next); 35398d5981c4SBharata B Rao } 35408d5981c4SBharata B Rao return ds; 35410cffce56SDavid Gibson } 35420cffce56SDavid Gibson 3543ce2918cbSDavid Gibson static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr, 3544ce2918cbSDavid Gibson SpaprDimmState *dimm_state) 35450cffce56SDavid Gibson { 35460cffce56SDavid Gibson QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next); 35470cffce56SDavid Gibson g_free(dimm_state); 35480cffce56SDavid Gibson } 3549cf632463SBharata B Rao 3550ce2918cbSDavid Gibson static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms, 355116ee9980SDaniel Henrique Barboza PCDIMMDevice *dimm) 355216ee9980SDaniel Henrique Barboza { 3553ce2918cbSDavid Gibson SpaprDrc *drc; 3554946d6154SDavid Hildenbrand uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm), 3555946d6154SDavid Hildenbrand &error_abort); 355616ee9980SDaniel Henrique Barboza uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 355716ee9980SDaniel Henrique Barboza uint32_t avail_lmbs = 0; 355816ee9980SDaniel Henrique Barboza uint64_t addr_start, addr; 355916ee9980SDaniel Henrique Barboza int i; 356016ee9980SDaniel Henrique Barboza 356165226afdSGreg Kurz addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, 356216ee9980SDaniel Henrique Barboza &error_abort); 356316ee9980SDaniel Henrique Barboza 356416ee9980SDaniel Henrique Barboza addr = addr_start; 356516ee9980SDaniel Henrique Barboza for (i = 0; i < nr_lmbs; i++) { 3566fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 356716ee9980SDaniel Henrique Barboza addr / SPAPR_MEMORY_BLOCK_SIZE); 356816ee9980SDaniel Henrique Barboza g_assert(drc); 3569454b580aSDavid Gibson if (drc->dev) { 357016ee9980SDaniel Henrique Barboza avail_lmbs++; 357116ee9980SDaniel Henrique Barboza } 357216ee9980SDaniel Henrique Barboza addr += SPAPR_MEMORY_BLOCK_SIZE; 357316ee9980SDaniel Henrique Barboza } 357416ee9980SDaniel Henrique Barboza 35758d5981c4SBharata B Rao return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm); 357616ee9980SDaniel Henrique Barboza } 357716ee9980SDaniel Henrique Barboza 357831834723SDaniel Henrique Barboza /* Callback to be called during DRC release. */ 357931834723SDaniel Henrique Barboza void spapr_lmb_release(DeviceState *dev) 3580cf632463SBharata B Rao { 35813ec71474SDavid Hildenbrand HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3582ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl); 3583ce2918cbSDavid Gibson SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); 3584cf632463SBharata B Rao 358516ee9980SDaniel Henrique Barboza /* This information will get lost if a migration occurs 358616ee9980SDaniel Henrique Barboza * during the unplug process. In this case recover it. */ 358716ee9980SDaniel Henrique Barboza if (ds == NULL) { 358816ee9980SDaniel Henrique Barboza ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev)); 35898d5981c4SBharata B Rao g_assert(ds); 3590454b580aSDavid Gibson /* The DRC being examined by the caller at least must be counted */ 3591454b580aSDavid Gibson g_assert(ds->nr_lmbs); 359216ee9980SDaniel Henrique Barboza } 3593454b580aSDavid Gibson 3594454b580aSDavid Gibson if (--ds->nr_lmbs) { 3595cf632463SBharata B Rao return; 3596cf632463SBharata B Rao } 3597cf632463SBharata B Rao 3598cf632463SBharata B Rao /* 3599cf632463SBharata B Rao * Now that all the LMBs have been removed by the guest, call the 36003ec71474SDavid Hildenbrand * unplug handler chain. This can never fail. 3601cf632463SBharata B Rao */ 36023ec71474SDavid Hildenbrand hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 360307578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 36043ec71474SDavid Hildenbrand } 36053ec71474SDavid Hildenbrand 36063ec71474SDavid Hildenbrand static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 36073ec71474SDavid Hildenbrand { 3608ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3609ce2918cbSDavid Gibson SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); 36103ec71474SDavid Hildenbrand 3611fd3416f5SDavid Hildenbrand pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev)); 3612981c3dcdSMarkus Armbruster qdev_unrealize(dev); 36132a129767SDaniel Henrique Barboza spapr_pending_dimm_unplugs_remove(spapr, ds); 3614cf632463SBharata B Rao } 3615cf632463SBharata B Rao 3616cf632463SBharata B Rao static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev, 3617cf632463SBharata B Rao DeviceState *dev, Error **errp) 3618cf632463SBharata B Rao { 3619ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3620cf632463SBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 362104790978SThomas Huth uint32_t nr_lmbs; 362204790978SThomas Huth uint64_t size, addr_start, addr; 36230cffce56SDavid Gibson int i; 3624ce2918cbSDavid Gibson SpaprDrc *drc; 362504790978SThomas Huth 3626ee3a71e3SShivaprasad G Bhat if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { 3627dcfe4805SMarkus Armbruster error_setg(errp, "nvdimm device hot unplug is not supported yet."); 3628dcfe4805SMarkus Armbruster return; 3629ee3a71e3SShivaprasad G Bhat } 3630ee3a71e3SShivaprasad G Bhat 3631946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort); 363204790978SThomas Huth nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 363304790978SThomas Huth 36349ed442b8SMarc-André Lureau addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, 3635271ced1dSGreg Kurz &error_abort); 3636cf632463SBharata B Rao 36372a129767SDaniel Henrique Barboza /* 36382a129767SDaniel Henrique Barboza * An existing pending dimm state for this DIMM means that there is an 36392a129767SDaniel Henrique Barboza * unplug operation in progress, waiting for the spapr_lmb_release 36402a129767SDaniel Henrique Barboza * callback to complete the job (BQL can't cover that far). In this case, 36412a129767SDaniel Henrique Barboza * bail out to avoid detaching DRCs that were already released. 36422a129767SDaniel Henrique Barboza */ 36432a129767SDaniel Henrique Barboza if (spapr_pending_dimm_unplugs_find(spapr, dimm)) { 3644dcfe4805SMarkus Armbruster error_setg(errp, "Memory unplug already in progress for device %s", 36452a129767SDaniel Henrique Barboza dev->id); 3646dcfe4805SMarkus Armbruster return; 36472a129767SDaniel Henrique Barboza } 36482a129767SDaniel Henrique Barboza 36498d5981c4SBharata B Rao spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm); 36500cffce56SDavid Gibson 36510cffce56SDavid Gibson addr = addr_start; 36520cffce56SDavid Gibson for (i = 0; i < nr_lmbs; i++) { 3653fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 36540cffce56SDavid Gibson addr / SPAPR_MEMORY_BLOCK_SIZE); 36550cffce56SDavid Gibson g_assert(drc); 36560cffce56SDavid Gibson 3657a8dc47fdSDavid Gibson spapr_drc_detach(drc); 36580cffce56SDavid Gibson addr += SPAPR_MEMORY_BLOCK_SIZE; 36590cffce56SDavid Gibson } 36600cffce56SDavid Gibson 3661fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 36620cffce56SDavid Gibson addr_start / SPAPR_MEMORY_BLOCK_SIZE); 366373231f7cSGreg Kurz g_assert(drc); 36640cffce56SDavid Gibson spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 36650b55aa91SDavid Gibson nr_lmbs, spapr_drc_index(drc)); 3666cf632463SBharata B Rao } 3667cf632463SBharata B Rao 3668765d1bddSDavid Gibson /* Callback to be called during DRC release. */ 3669765d1bddSDavid Gibson void spapr_core_release(DeviceState *dev) 3670ff9006ddSIgor Mammedov { 3671a4261be1SDavid Hildenbrand HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3672a4261be1SDavid Hildenbrand 3673a4261be1SDavid Hildenbrand /* Call the unplug handler chain. This can never fail. */ 3674a4261be1SDavid Hildenbrand hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 367507578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 3676a4261be1SDavid Hildenbrand } 3677a4261be1SDavid Hildenbrand 3678a4261be1SDavid Hildenbrand static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 3679a4261be1SDavid Hildenbrand { 3680a4261be1SDavid Hildenbrand MachineState *ms = MACHINE(hotplug_dev); 3681ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); 3682ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3683535455fdSIgor Mammedov CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); 3684ff9006ddSIgor Mammedov 368546f7afa3SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 3686ce2918cbSDavid Gibson SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); 368746f7afa3SGreg Kurz int i; 368846f7afa3SGreg Kurz 368946f7afa3SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 369094ad93bdSGreg Kurz CPUState *cs = CPU(sc->threads[i]); 369146f7afa3SGreg Kurz 369246f7afa3SGreg Kurz pre_2_10_vmstate_register_dummy_icp(cs->cpu_index); 369346f7afa3SGreg Kurz } 369446f7afa3SGreg Kurz } 369546f7afa3SGreg Kurz 369607572c06SGreg Kurz assert(core_slot); 3697535455fdSIgor Mammedov core_slot->cpu = NULL; 3698981c3dcdSMarkus Armbruster qdev_unrealize(dev); 3699ff9006ddSIgor Mammedov } 3700ff9006ddSIgor Mammedov 3701115debf2SIgor Mammedov static 3702115debf2SIgor Mammedov void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev, 3703ff9006ddSIgor Mammedov Error **errp) 3704ff9006ddSIgor Mammedov { 3705ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3706535455fdSIgor Mammedov int index; 3707ce2918cbSDavid Gibson SpaprDrc *drc; 3708535455fdSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3709ff9006ddSIgor Mammedov 3710535455fdSIgor Mammedov if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) { 3711535455fdSIgor Mammedov error_setg(errp, "Unable to find CPU core with core-id: %d", 3712535455fdSIgor Mammedov cc->core_id); 3713535455fdSIgor Mammedov return; 3714535455fdSIgor Mammedov } 3715ff9006ddSIgor Mammedov if (index == 0) { 3716ff9006ddSIgor Mammedov error_setg(errp, "Boot CPU core may not be unplugged"); 3717ff9006ddSIgor Mammedov return; 3718ff9006ddSIgor Mammedov } 3719ff9006ddSIgor Mammedov 37205d0fb150SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, 37215d0fb150SGreg Kurz spapr_vcpu_id(spapr, cc->core_id)); 3722ff9006ddSIgor Mammedov g_assert(drc); 3723ff9006ddSIgor Mammedov 372447c8c915SGreg Kurz if (!spapr_drc_unplug_requested(drc)) { 3725a8dc47fdSDavid Gibson spapr_drc_detach(drc); 3726ff9006ddSIgor Mammedov spapr_hotplug_req_remove_by_index(drc); 3727ff9006ddSIgor Mammedov } 372847c8c915SGreg Kurz } 3729ff9006ddSIgor Mammedov 3730ce2918cbSDavid Gibson int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 3731345b12b9SGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 3732345b12b9SGreg Kurz { 3733ce2918cbSDavid Gibson SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev); 3734345b12b9SGreg Kurz CPUState *cs = CPU(core->threads[0]); 3735345b12b9SGreg Kurz PowerPCCPU *cpu = POWERPC_CPU(cs); 3736345b12b9SGreg Kurz DeviceClass *dc = DEVICE_GET_CLASS(cs); 3737345b12b9SGreg Kurz int id = spapr_get_vcpu_id(cpu); 37387265bc3eSDaniel Henrique Barboza g_autofree char *nodename = NULL; 3739345b12b9SGreg Kurz int offset; 3740345b12b9SGreg Kurz 3741345b12b9SGreg Kurz nodename = g_strdup_printf("%s@%x", dc->fw_name, id); 3742345b12b9SGreg Kurz offset = fdt_add_subnode(fdt, 0, nodename); 3743345b12b9SGreg Kurz 374491335a5eSDavid Gibson spapr_dt_cpu(cs, fdt, offset, spapr); 3745345b12b9SGreg Kurz 3746a85bb34eSDaniel Henrique Barboza /* 3747a85bb34eSDaniel Henrique Barboza * spapr_dt_cpu() does not fill the 'name' property in the 3748a85bb34eSDaniel Henrique Barboza * CPU node. The function is called during boot process, before 3749a85bb34eSDaniel Henrique Barboza * and after CAS, and overwriting the 'name' property written 3750a85bb34eSDaniel Henrique Barboza * by SLOF is not allowed. 3751a85bb34eSDaniel Henrique Barboza * 3752a85bb34eSDaniel Henrique Barboza * Write it manually after spapr_dt_cpu(). This makes the hotplug 3753a85bb34eSDaniel Henrique Barboza * CPUs more compatible with the coldplugged ones, which have 3754a85bb34eSDaniel Henrique Barboza * the 'name' property. Linux Kernel also relies on this 3755a85bb34eSDaniel Henrique Barboza * property to identify CPU nodes. 3756a85bb34eSDaniel Henrique Barboza */ 3757a85bb34eSDaniel Henrique Barboza _FDT((fdt_setprop_string(fdt, offset, "name", nodename))); 3758a85bb34eSDaniel Henrique Barboza 3759345b12b9SGreg Kurz *fdt_start_offset = offset; 3760345b12b9SGreg Kurz return 0; 3761345b12b9SGreg Kurz } 3762345b12b9SGreg Kurz 3763f9b43958SGreg Kurz static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) 3764ff9006ddSIgor Mammedov { 3765ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3766ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(spapr); 3767ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 3768ce2918cbSDavid Gibson SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev)); 3769ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3770345b12b9SGreg Kurz CPUState *cs; 3771ce2918cbSDavid Gibson SpaprDrc *drc; 3772535455fdSIgor Mammedov CPUArchId *core_slot; 3773535455fdSIgor Mammedov int index; 377494fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 3775b1e81567SGreg Kurz int i; 3776ff9006ddSIgor Mammedov 3777535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 3778f9b43958SGreg Kurz g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */ 3779f9b43958SGreg Kurz 37805d0fb150SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, 37815d0fb150SGreg Kurz spapr_vcpu_id(spapr, cc->core_id)); 3782ff9006ddSIgor Mammedov 3783c5514d0eSIgor Mammedov g_assert(drc || !mc->has_hotpluggable_cpus); 3784ff9006ddSIgor Mammedov 3785e49c63d5SGreg Kurz if (drc) { 3786f9b43958SGreg Kurz /* 3787f9b43958SGreg Kurz * spapr_core_pre_plug() already buys us this is a brand new 3788f9b43958SGreg Kurz * core being plugged into a free slot. Nothing should already 3789f9b43958SGreg Kurz * be attached to the corresponding DRC. 3790f9b43958SGreg Kurz */ 3791bc370a65SGreg Kurz spapr_drc_attach(drc, dev); 3792ff9006ddSIgor Mammedov 379394fd9cbaSLaurent Vivier if (hotplugged) { 3794ff9006ddSIgor Mammedov /* 379594fd9cbaSLaurent Vivier * Send hotplug notification interrupt to the guest only 379694fd9cbaSLaurent Vivier * in case of hotplugged CPUs. 3797ff9006ddSIgor Mammedov */ 3798ff9006ddSIgor Mammedov spapr_hotplug_req_add_by_index(drc); 379994fd9cbaSLaurent Vivier } else { 380094fd9cbaSLaurent Vivier spapr_drc_reset(drc); 3801ff9006ddSIgor Mammedov } 380294fd9cbaSLaurent Vivier } 380394fd9cbaSLaurent Vivier 3804535455fdSIgor Mammedov core_slot->cpu = OBJECT(dev); 380546f7afa3SGreg Kurz 3806b1e81567SGreg Kurz /* 3807b1e81567SGreg Kurz * Set compatibility mode to match the boot CPU, which was either set 380837641213SGreg Kurz * by the machine reset code or by CAS. This really shouldn't fail at 380937641213SGreg Kurz * this point. 3810b1e81567SGreg Kurz */ 3811b1e81567SGreg Kurz if (hotplugged) { 3812b1e81567SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 381337641213SGreg Kurz ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr, 381437641213SGreg Kurz &error_abort); 3815b1e81567SGreg Kurz } 3816b1e81567SGreg Kurz } 38171b4ab514SGreg Kurz 38181b4ab514SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 38191b4ab514SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 38201b4ab514SGreg Kurz cs = CPU(core->threads[i]); 38211b4ab514SGreg Kurz pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); 38221b4ab514SGreg Kurz } 38231b4ab514SGreg Kurz } 3824ff9006ddSIgor Mammedov } 3825ff9006ddSIgor Mammedov 3826ff9006ddSIgor Mammedov static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3827ff9006ddSIgor Mammedov Error **errp) 3828ff9006ddSIgor Mammedov { 3829ff9006ddSIgor Mammedov MachineState *machine = MACHINE(OBJECT(hotplug_dev)); 3830ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); 3831ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 38322e9c10ebSIgor Mammedov const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type); 3833ff9006ddSIgor Mammedov const char *type = object_get_typename(OBJECT(dev)); 3834535455fdSIgor Mammedov CPUArchId *core_slot; 3835535455fdSIgor Mammedov int index; 3836fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 3837ff9006ddSIgor Mammedov 3838c5514d0eSIgor Mammedov if (dev->hotplugged && !mc->has_hotpluggable_cpus) { 3839dcfe4805SMarkus Armbruster error_setg(errp, "CPU hotplug not supported for this machine"); 3840dcfe4805SMarkus Armbruster return; 3841ff9006ddSIgor Mammedov } 3842ff9006ddSIgor Mammedov 3843ff9006ddSIgor Mammedov if (strcmp(base_core_type, type)) { 3844dcfe4805SMarkus Armbruster error_setg(errp, "CPU core type should be %s", base_core_type); 3845dcfe4805SMarkus Armbruster return; 3846ff9006ddSIgor Mammedov } 3847ff9006ddSIgor Mammedov 3848ff9006ddSIgor Mammedov if (cc->core_id % smp_threads) { 3849dcfe4805SMarkus Armbruster error_setg(errp, "invalid core id %d", cc->core_id); 3850dcfe4805SMarkus Armbruster return; 3851ff9006ddSIgor Mammedov } 3852ff9006ddSIgor Mammedov 3853459264efSDavid Gibson /* 3854459264efSDavid Gibson * In general we should have homogeneous threads-per-core, but old 3855459264efSDavid Gibson * (pre hotplug support) machine types allow the last core to have 3856459264efSDavid Gibson * reduced threads as a compatibility hack for when we allowed 3857459264efSDavid Gibson * total vcpus not a multiple of threads-per-core. 3858459264efSDavid Gibson */ 3859459264efSDavid Gibson if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) { 3860dcfe4805SMarkus Armbruster error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads, 3861dcfe4805SMarkus Armbruster smp_threads); 3862dcfe4805SMarkus Armbruster return; 38638149e299SDavid Gibson } 38648149e299SDavid Gibson 3865535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 3866535455fdSIgor Mammedov if (!core_slot) { 3867dcfe4805SMarkus Armbruster error_setg(errp, "core id %d out of range", cc->core_id); 3868dcfe4805SMarkus Armbruster return; 3869ff9006ddSIgor Mammedov } 3870ff9006ddSIgor Mammedov 3871535455fdSIgor Mammedov if (core_slot->cpu) { 3872dcfe4805SMarkus Armbruster error_setg(errp, "core %d already populated", cc->core_id); 3873dcfe4805SMarkus Armbruster return; 3874ff9006ddSIgor Mammedov } 3875ff9006ddSIgor Mammedov 3876dcfe4805SMarkus Armbruster numa_cpu_pre_plug(core_slot, dev, errp); 3877ff9006ddSIgor Mammedov } 3878ff9006ddSIgor Mammedov 3879ce2918cbSDavid Gibson int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 3880bb2bdd81SGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 3881bb2bdd81SGreg Kurz { 3882ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev); 3883bb2bdd81SGreg Kurz int intc_phandle; 3884bb2bdd81SGreg Kurz 3885bb2bdd81SGreg Kurz intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp); 3886bb2bdd81SGreg Kurz if (intc_phandle <= 0) { 3887bb2bdd81SGreg Kurz return -1; 3888bb2bdd81SGreg Kurz } 3889bb2bdd81SGreg Kurz 38908cbe71ecSDavid Gibson if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) { 3891bb2bdd81SGreg Kurz error_setg(errp, "unable to create FDT node for PHB %d", sphb->index); 3892bb2bdd81SGreg Kurz return -1; 3893bb2bdd81SGreg Kurz } 3894bb2bdd81SGreg Kurz 3895bb2bdd81SGreg Kurz /* generally SLOF creates these, for hotplug it's up to QEMU */ 3896bb2bdd81SGreg Kurz _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci")); 3897bb2bdd81SGreg Kurz 3898bb2bdd81SGreg Kurz return 0; 3899bb2bdd81SGreg Kurz } 3900bb2bdd81SGreg Kurz 3901f5598c92SGreg Kurz static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3902bb2bdd81SGreg Kurz Error **errp) 3903bb2bdd81SGreg Kurz { 3904ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3905ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 3906ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 3907bb2bdd81SGreg Kurz const unsigned windows_supported = spapr_phb_windows_supported(sphb); 39089a070699SGreg Kurz SpaprDrc *drc; 3909bb2bdd81SGreg Kurz 3910bb2bdd81SGreg Kurz if (dev->hotplugged && !smc->dr_phb_enabled) { 3911bb2bdd81SGreg Kurz error_setg(errp, "PHB hotplug not supported for this machine"); 3912f5598c92SGreg Kurz return false; 3913bb2bdd81SGreg Kurz } 3914bb2bdd81SGreg Kurz 3915bb2bdd81SGreg Kurz if (sphb->index == (uint32_t)-1) { 3916bb2bdd81SGreg Kurz error_setg(errp, "\"index\" for PAPR PHB is mandatory"); 3917f5598c92SGreg Kurz return false; 3918bb2bdd81SGreg Kurz } 3919bb2bdd81SGreg Kurz 39209a070699SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 39219a070699SGreg Kurz if (drc && drc->dev) { 39229a070699SGreg Kurz error_setg(errp, "PHB %d already attached", sphb->index); 39239a070699SGreg Kurz return false; 39249a070699SGreg Kurz } 39259a070699SGreg Kurz 3926bb2bdd81SGreg Kurz /* 3927bb2bdd81SGreg Kurz * This will check that sphb->index doesn't exceed the maximum number of 3928bb2bdd81SGreg Kurz * PHBs for the current machine type. 3929bb2bdd81SGreg Kurz */ 3930f5598c92SGreg Kurz return 3931bb2bdd81SGreg Kurz smc->phb_placement(spapr, sphb->index, 3932bb2bdd81SGreg Kurz &sphb->buid, &sphb->io_win_addr, 3933bb2bdd81SGreg Kurz &sphb->mem_win_addr, &sphb->mem64_win_addr, 3934ec132efaSAlexey Kardashevskiy windows_supported, sphb->dma_liobn, 3935ec132efaSAlexey Kardashevskiy &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr, 3936ec132efaSAlexey Kardashevskiy errp); 3937bb2bdd81SGreg Kurz } 3938bb2bdd81SGreg Kurz 39399a070699SGreg Kurz static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev) 3940bb2bdd81SGreg Kurz { 3941ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3942ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 3943ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 3944ce2918cbSDavid Gibson SpaprDrc *drc; 3945bb2bdd81SGreg Kurz bool hotplugged = spapr_drc_hotplugged(dev); 3946bb2bdd81SGreg Kurz 3947bb2bdd81SGreg Kurz if (!smc->dr_phb_enabled) { 3948bb2bdd81SGreg Kurz return; 3949bb2bdd81SGreg Kurz } 3950bb2bdd81SGreg Kurz 3951bb2bdd81SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 3952bb2bdd81SGreg Kurz /* hotplug hooks should check it's enabled before getting this far */ 3953bb2bdd81SGreg Kurz assert(drc); 3954bb2bdd81SGreg Kurz 39559a070699SGreg Kurz /* spapr_phb_pre_plug() already checked the DRC is attachable */ 3956bc370a65SGreg Kurz spapr_drc_attach(drc, dev); 3957bb2bdd81SGreg Kurz 3958bb2bdd81SGreg Kurz if (hotplugged) { 3959bb2bdd81SGreg Kurz spapr_hotplug_req_add_by_index(drc); 3960bb2bdd81SGreg Kurz } else { 3961bb2bdd81SGreg Kurz spapr_drc_reset(drc); 3962bb2bdd81SGreg Kurz } 3963bb2bdd81SGreg Kurz } 3964bb2bdd81SGreg Kurz 3965bb2bdd81SGreg Kurz void spapr_phb_release(DeviceState *dev) 3966bb2bdd81SGreg Kurz { 3967bb2bdd81SGreg Kurz HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3968bb2bdd81SGreg Kurz 3969bb2bdd81SGreg Kurz hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 397007578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 3971bb2bdd81SGreg Kurz } 3972bb2bdd81SGreg Kurz 3973bb2bdd81SGreg Kurz static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 3974bb2bdd81SGreg Kurz { 3975981c3dcdSMarkus Armbruster qdev_unrealize(dev); 3976bb2bdd81SGreg Kurz } 3977bb2bdd81SGreg Kurz 3978bb2bdd81SGreg Kurz static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev, 3979bb2bdd81SGreg Kurz DeviceState *dev, Error **errp) 3980bb2bdd81SGreg Kurz { 3981ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 3982ce2918cbSDavid Gibson SpaprDrc *drc; 3983bb2bdd81SGreg Kurz 3984bb2bdd81SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 3985bb2bdd81SGreg Kurz assert(drc); 3986bb2bdd81SGreg Kurz 3987bb2bdd81SGreg Kurz if (!spapr_drc_unplug_requested(drc)) { 3988bb2bdd81SGreg Kurz spapr_drc_detach(drc); 3989bb2bdd81SGreg Kurz spapr_hotplug_req_remove_by_index(drc); 3990bb2bdd81SGreg Kurz } 3991bb2bdd81SGreg Kurz } 3992bb2bdd81SGreg Kurz 3993ac96807bSGreg Kurz static 3994ac96807bSGreg Kurz bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 39950fb6bd07SMichael Roth Error **errp) 39960fb6bd07SMichael Roth { 39970fb6bd07SMichael Roth SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3998ac96807bSGreg Kurz 3999ac96807bSGreg Kurz if (spapr->tpm_proxy != NULL) { 4000ac96807bSGreg Kurz error_setg(errp, "Only one TPM proxy can be specified for this machine"); 4001ac96807bSGreg Kurz return false; 4002ac96807bSGreg Kurz } 4003ac96807bSGreg Kurz 4004ac96807bSGreg Kurz return true; 4005ac96807bSGreg Kurz } 4006ac96807bSGreg Kurz 4007ac96807bSGreg Kurz static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev) 4008ac96807bSGreg Kurz { 4009ac96807bSGreg Kurz SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 40100fb6bd07SMichael Roth SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev); 40110fb6bd07SMichael Roth 4012ac96807bSGreg Kurz /* Already checked in spapr_tpm_proxy_pre_plug() */ 4013ac96807bSGreg Kurz g_assert(spapr->tpm_proxy == NULL); 40140fb6bd07SMichael Roth 40150fb6bd07SMichael Roth spapr->tpm_proxy = tpm_proxy; 40160fb6bd07SMichael Roth } 40170fb6bd07SMichael Roth 40180fb6bd07SMichael Roth static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 40190fb6bd07SMichael Roth { 40200fb6bd07SMichael Roth SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 40210fb6bd07SMichael Roth 4022981c3dcdSMarkus Armbruster qdev_unrealize(dev); 40230fb6bd07SMichael Roth object_unparent(OBJECT(dev)); 40240fb6bd07SMichael Roth spapr->tpm_proxy = NULL; 40250fb6bd07SMichael Roth } 40260fb6bd07SMichael Roth 4027c20d332aSBharata B Rao static void spapr_machine_device_plug(HotplugHandler *hotplug_dev, 4028c20d332aSBharata B Rao DeviceState *dev, Error **errp) 4029c20d332aSBharata B Rao { 4030c20d332aSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 4031ea042c53SGreg Kurz spapr_memory_plug(hotplug_dev, dev); 4032af81cf32SBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4033f9b43958SGreg Kurz spapr_core_plug(hotplug_dev, dev); 4034bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 40359a070699SGreg Kurz spapr_phb_plug(hotplug_dev, dev); 40360fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 4037ac96807bSGreg Kurz spapr_tpm_proxy_plug(hotplug_dev, dev); 4038c20d332aSBharata B Rao } 4039c20d332aSBharata B Rao } 4040c20d332aSBharata B Rao 404188432f44SDavid Hildenbrand static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev, 404288432f44SDavid Hildenbrand DeviceState *dev, Error **errp) 404388432f44SDavid Hildenbrand { 40443ec71474SDavid Hildenbrand if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 40453ec71474SDavid Hildenbrand spapr_memory_unplug(hotplug_dev, dev); 4046a4261be1SDavid Hildenbrand } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4047a4261be1SDavid Hildenbrand spapr_core_unplug(hotplug_dev, dev); 4048bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4049bb2bdd81SGreg Kurz spapr_phb_unplug(hotplug_dev, dev); 40500fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 40510fb6bd07SMichael Roth spapr_tpm_proxy_unplug(hotplug_dev, dev); 40523ec71474SDavid Hildenbrand } 405388432f44SDavid Hildenbrand } 405488432f44SDavid Hildenbrand 405573598c75SGreg Kurz bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr) 405673598c75SGreg Kurz { 405773598c75SGreg Kurz return spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT) || 405873598c75SGreg Kurz /* 405973598c75SGreg Kurz * CAS will process all pending unplug requests. 406073598c75SGreg Kurz * 406173598c75SGreg Kurz * HACK: a guest could theoretically have cleared all bits in OV5, 406273598c75SGreg Kurz * but none of the guests we care for do. 406373598c75SGreg Kurz */ 406473598c75SGreg Kurz spapr_ovec_empty(spapr->ov5_cas); 406573598c75SGreg Kurz } 406673598c75SGreg Kurz 4067cf632463SBharata B Rao static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev, 4068cf632463SBharata B Rao DeviceState *dev, Error **errp) 4069cf632463SBharata B Rao { 4070ce2918cbSDavid Gibson SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev)); 4071c86c1affSDaniel Henrique Barboza MachineClass *mc = MACHINE_GET_CLASS(sms); 4072ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4073cf632463SBharata B Rao 4074cf632463SBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 407573598c75SGreg Kurz if (spapr_memory_hot_unplug_supported(sms)) { 4076cf632463SBharata B Rao spapr_memory_unplug_request(hotplug_dev, dev, errp); 4077cf632463SBharata B Rao } else { 4078cf632463SBharata B Rao error_setg(errp, "Memory hot unplug not supported for this guest"); 4079cf632463SBharata B Rao } 40806f4b5c3eSBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4081c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 40826f4b5c3eSBharata B Rao error_setg(errp, "CPU hot unplug not supported on this machine"); 40836f4b5c3eSBharata B Rao return; 40846f4b5c3eSBharata B Rao } 4085115debf2SIgor Mammedov spapr_core_unplug_request(hotplug_dev, dev, errp); 4086bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4087bb2bdd81SGreg Kurz if (!smc->dr_phb_enabled) { 4088bb2bdd81SGreg Kurz error_setg(errp, "PHB hot unplug not supported on this machine"); 4089bb2bdd81SGreg Kurz return; 4090bb2bdd81SGreg Kurz } 4091bb2bdd81SGreg Kurz spapr_phb_unplug_request(hotplug_dev, dev, errp); 40920fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 40930fb6bd07SMichael Roth spapr_tpm_proxy_unplug(hotplug_dev, dev); 4094c20d332aSBharata B Rao } 4095c20d332aSBharata B Rao } 4096c20d332aSBharata B Rao 409794a94e4cSBharata B Rao static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev, 409894a94e4cSBharata B Rao DeviceState *dev, Error **errp) 409994a94e4cSBharata B Rao { 4100c871bc70SLaurent Vivier if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 4101c871bc70SLaurent Vivier spapr_memory_pre_plug(hotplug_dev, dev, errp); 4102c871bc70SLaurent Vivier } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 410394a94e4cSBharata B Rao spapr_core_pre_plug(hotplug_dev, dev, errp); 4104bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4105bb2bdd81SGreg Kurz spapr_phb_pre_plug(hotplug_dev, dev, errp); 4106ac96807bSGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 4107ac96807bSGreg Kurz spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp); 410894a94e4cSBharata B Rao } 410994a94e4cSBharata B Rao } 411094a94e4cSBharata B Rao 41117ebaf795SBharata B Rao static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine, 4112c20d332aSBharata B Rao DeviceState *dev) 4113c20d332aSBharata B Rao { 411494a94e4cSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || 4115bb2bdd81SGreg Kurz object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) || 41160fb6bd07SMichael Roth object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) || 41170fb6bd07SMichael Roth object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 4118c20d332aSBharata B Rao return HOTPLUG_HANDLER(machine); 4119c20d332aSBharata B Rao } 4120cb600087SDavid Gibson if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 4121cb600087SDavid Gibson PCIDevice *pcidev = PCI_DEVICE(dev); 4122cb600087SDavid Gibson PCIBus *root = pci_device_root_bus(pcidev); 4123cb600087SDavid Gibson SpaprPhbState *phb = 4124cb600087SDavid Gibson (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent), 4125cb600087SDavid Gibson TYPE_SPAPR_PCI_HOST_BRIDGE); 4126cb600087SDavid Gibson 4127cb600087SDavid Gibson if (phb) { 4128cb600087SDavid Gibson return HOTPLUG_HANDLER(phb); 4129cb600087SDavid Gibson } 4130cb600087SDavid Gibson } 4131c20d332aSBharata B Rao return NULL; 4132c20d332aSBharata B Rao } 4133c20d332aSBharata B Rao 4134ea089eebSIgor Mammedov static CpuInstanceProperties 4135ea089eebSIgor Mammedov spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index) 413620bb648dSDavid Gibson { 4137ea089eebSIgor Mammedov CPUArchId *core_slot; 4138ea089eebSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 4139ea089eebSIgor Mammedov 4140ea089eebSIgor Mammedov /* make sure possible_cpu are intialized */ 4141ea089eebSIgor Mammedov mc->possible_cpu_arch_ids(machine); 4142ea089eebSIgor Mammedov /* get CPU core slot containing thread that matches cpu_index */ 4143ea089eebSIgor Mammedov core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL); 4144ea089eebSIgor Mammedov assert(core_slot); 4145ea089eebSIgor Mammedov return core_slot->props; 414620bb648dSDavid Gibson } 414720bb648dSDavid Gibson 414879e07936SIgor Mammedov static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx) 414979e07936SIgor Mammedov { 4150aa570207STao Xu return idx / ms->smp.cores % ms->numa_state->num_nodes; 415179e07936SIgor Mammedov } 415279e07936SIgor Mammedov 4153535455fdSIgor Mammedov static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) 4154535455fdSIgor Mammedov { 4155535455fdSIgor Mammedov int i; 4156fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 4157fe6b6346SLike Xu unsigned int smp_cpus = machine->smp.cpus; 4158d342eb76SIgor Mammedov const char *core_type; 4159fe6b6346SLike Xu int spapr_max_cores = machine->smp.max_cpus / smp_threads; 4160535455fdSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 4161535455fdSIgor Mammedov 4162c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 4163535455fdSIgor Mammedov spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads; 4164535455fdSIgor Mammedov } 4165535455fdSIgor Mammedov if (machine->possible_cpus) { 4166535455fdSIgor Mammedov assert(machine->possible_cpus->len == spapr_max_cores); 4167535455fdSIgor Mammedov return machine->possible_cpus; 4168535455fdSIgor Mammedov } 4169535455fdSIgor Mammedov 4170d342eb76SIgor Mammedov core_type = spapr_get_cpu_core_type(machine->cpu_type); 4171d342eb76SIgor Mammedov if (!core_type) { 4172d342eb76SIgor Mammedov error_report("Unable to find sPAPR CPU Core definition"); 4173d342eb76SIgor Mammedov exit(1); 4174d342eb76SIgor Mammedov } 4175d342eb76SIgor Mammedov 4176535455fdSIgor Mammedov machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + 4177535455fdSIgor Mammedov sizeof(CPUArchId) * spapr_max_cores); 4178535455fdSIgor Mammedov machine->possible_cpus->len = spapr_max_cores; 4179535455fdSIgor Mammedov for (i = 0; i < machine->possible_cpus->len; i++) { 4180535455fdSIgor Mammedov int core_id = i * smp_threads; 4181535455fdSIgor Mammedov 4182d342eb76SIgor Mammedov machine->possible_cpus->cpus[i].type = core_type; 4183f2d672c2SIgor Mammedov machine->possible_cpus->cpus[i].vcpus_count = smp_threads; 4184535455fdSIgor Mammedov machine->possible_cpus->cpus[i].arch_id = core_id; 4185535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.has_core_id = true; 4186535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.core_id = core_id; 4187535455fdSIgor Mammedov } 4188535455fdSIgor Mammedov return machine->possible_cpus; 4189535455fdSIgor Mammedov } 4190535455fdSIgor Mammedov 4191f5598c92SGreg Kurz static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, 4192daa23699SDavid Gibson uint64_t *buid, hwaddr *pio, 4193daa23699SDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 4194ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4195ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 41966737d9adSDavid Gibson { 4197357d1e3bSDavid Gibson /* 4198357d1e3bSDavid Gibson * New-style PHB window placement. 4199357d1e3bSDavid Gibson * 4200357d1e3bSDavid Gibson * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window 4201357d1e3bSDavid Gibson * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO 4202357d1e3bSDavid Gibson * windows. 4203357d1e3bSDavid Gibson * 4204357d1e3bSDavid Gibson * Some guest kernels can't work with MMIO windows above 1<<46 4205357d1e3bSDavid Gibson * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB 4206357d1e3bSDavid Gibson * 4207357d1e3bSDavid Gibson * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each 4208357d1e3bSDavid Gibson * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the 4209357d1e3bSDavid Gibson * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the 4210357d1e3bSDavid Gibson * 1TiB 64-bit MMIO windows for each PHB. 4211357d1e3bSDavid Gibson */ 42126737d9adSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 42136737d9adSDavid Gibson int i; 42146737d9adSDavid Gibson 4215357d1e3bSDavid Gibson /* Sanity check natural alignments */ 4216357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 4217357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 4218357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0); 4219357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0); 4220357d1e3bSDavid Gibson /* Sanity check bounds */ 422125e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) > 422225e6a118SMichael S. Tsirkin SPAPR_PCI_MEM32_WIN_SIZE); 422325e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) > 422425e6a118SMichael S. Tsirkin SPAPR_PCI_MEM64_WIN_SIZE); 42252efff1c0SDavid Gibson 422625e6a118SMichael S. Tsirkin if (index >= SPAPR_MAX_PHBS) { 422725e6a118SMichael S. Tsirkin error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)", 422825e6a118SMichael S. Tsirkin SPAPR_MAX_PHBS - 1); 4229f5598c92SGreg Kurz return false; 42306737d9adSDavid Gibson } 42316737d9adSDavid Gibson 42326737d9adSDavid Gibson *buid = base_buid + index; 42336737d9adSDavid Gibson for (i = 0; i < n_dma; ++i) { 42346737d9adSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 42356737d9adSDavid Gibson } 42366737d9adSDavid Gibson 4237357d1e3bSDavid Gibson *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; 4238357d1e3bSDavid Gibson *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; 4239357d1e3bSDavid Gibson *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; 4240ec132efaSAlexey Kardashevskiy 4241ec132efaSAlexey Kardashevskiy *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE; 4242ec132efaSAlexey Kardashevskiy *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE; 4243f5598c92SGreg Kurz return true; 42446737d9adSDavid Gibson } 42456737d9adSDavid Gibson 42467844e12bSCédric Le Goater static ICSState *spapr_ics_get(XICSFabric *dev, int irq) 42477844e12bSCédric Le Goater { 4248ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(dev); 42497844e12bSCédric Le Goater 42507844e12bSCédric Le Goater return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL; 42517844e12bSCédric Le Goater } 42527844e12bSCédric Le Goater 42537844e12bSCédric Le Goater static void spapr_ics_resend(XICSFabric *dev) 42547844e12bSCédric Le Goater { 4255ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(dev); 42567844e12bSCédric Le Goater 42577844e12bSCédric Le Goater ics_resend(spapr->ics); 42587844e12bSCédric Le Goater } 42597844e12bSCédric Le Goater 426081210c20SSam Bobroff static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id) 4261b2fc59aaSCédric Le Goater { 42622e886fb3SSam Bobroff PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); 4263b2fc59aaSCédric Le Goater 4264a28b9a5aSCédric Le Goater return cpu ? spapr_cpu_state(cpu)->icp : NULL; 4265b2fc59aaSCédric Le Goater } 4266b2fc59aaSCédric Le Goater 42676449da45SCédric Le Goater static void spapr_pic_print_info(InterruptStatsProvider *obj, 42686449da45SCédric Le Goater Monitor *mon) 42696449da45SCédric Le Goater { 4270ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 42716449da45SCédric Le Goater 4272328d8eb2SDavid Gibson spapr_irq_print_info(spapr, mon); 4273f041d6afSGreg Kurz monitor_printf(mon, "irqchip: %s\n", 4274f041d6afSGreg Kurz kvm_irqchip_in_kernel() ? "in-kernel" : "emulated"); 42756449da45SCédric Le Goater } 42766449da45SCédric Le Goater 4277baa45b17SCédric Le Goater /* 4278baa45b17SCédric Le Goater * This is a XIVE only operation 4279baa45b17SCédric Le Goater */ 4280932de7aeSCédric Le Goater static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, 4281932de7aeSCédric Le Goater uint8_t nvt_blk, uint32_t nvt_idx, 4282932de7aeSCédric Le Goater bool cam_ignore, uint8_t priority, 4283932de7aeSCédric Le Goater uint32_t logic_serv, XiveTCTXMatch *match) 4284932de7aeSCédric Le Goater { 4285932de7aeSCédric Le Goater SpaprMachineState *spapr = SPAPR_MACHINE(xfb); 4286baa45b17SCédric Le Goater XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc); 4287932de7aeSCédric Le Goater XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); 4288932de7aeSCédric Le Goater int count; 4289932de7aeSCédric Le Goater 4290932de7aeSCédric Le Goater count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, 4291932de7aeSCédric Le Goater priority, logic_serv, match); 4292932de7aeSCédric Le Goater if (count < 0) { 4293932de7aeSCédric Le Goater return count; 4294932de7aeSCédric Le Goater } 4295932de7aeSCédric Le Goater 4296932de7aeSCédric Le Goater /* 4297932de7aeSCédric Le Goater * When we implement the save and restore of the thread interrupt 4298932de7aeSCédric Le Goater * contexts in the enter/exit CPU handlers of the machine and the 4299932de7aeSCédric Le Goater * escalations in QEMU, we should be able to handle non dispatched 4300932de7aeSCédric Le Goater * vCPUs. 4301932de7aeSCédric Le Goater * 4302932de7aeSCédric Le Goater * Until this is done, the sPAPR machine should find at least one 4303932de7aeSCédric Le Goater * matching context always. 4304932de7aeSCédric Le Goater */ 4305932de7aeSCédric Le Goater if (count == 0) { 4306932de7aeSCédric Le Goater qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n", 4307932de7aeSCédric Le Goater nvt_blk, nvt_idx); 4308932de7aeSCédric Le Goater } 4309932de7aeSCédric Le Goater 4310932de7aeSCédric Le Goater return count; 4311932de7aeSCédric Le Goater } 4312932de7aeSCédric Le Goater 431314bb4486SGreg Kurz int spapr_get_vcpu_id(PowerPCCPU *cpu) 43142e886fb3SSam Bobroff { 4315b1a568c1SGreg Kurz return cpu->vcpu_id; 43162e886fb3SSam Bobroff } 43172e886fb3SSam Bobroff 4318cfdc5274SGreg Kurz bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp) 4319648edb64SGreg Kurz { 4320ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 4321fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 4322648edb64SGreg Kurz int vcpu_id; 4323648edb64SGreg Kurz 43245d0fb150SGreg Kurz vcpu_id = spapr_vcpu_id(spapr, cpu_index); 4325648edb64SGreg Kurz 4326648edb64SGreg Kurz if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) { 4327648edb64SGreg Kurz error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id); 4328648edb64SGreg Kurz error_append_hint(errp, "Adjust the number of cpus to %d " 4329648edb64SGreg Kurz "or try to raise the number of threads per core\n", 4330fe6b6346SLike Xu vcpu_id * ms->smp.threads / spapr->vsmt); 4331cfdc5274SGreg Kurz return false; 4332648edb64SGreg Kurz } 4333648edb64SGreg Kurz 4334648edb64SGreg Kurz cpu->vcpu_id = vcpu_id; 4335cfdc5274SGreg Kurz return true; 4336648edb64SGreg Kurz } 4337648edb64SGreg Kurz 43382e886fb3SSam Bobroff PowerPCCPU *spapr_find_cpu(int vcpu_id) 43392e886fb3SSam Bobroff { 43402e886fb3SSam Bobroff CPUState *cs; 43412e886fb3SSam Bobroff 43422e886fb3SSam Bobroff CPU_FOREACH(cs) { 43432e886fb3SSam Bobroff PowerPCCPU *cpu = POWERPC_CPU(cs); 43442e886fb3SSam Bobroff 434514bb4486SGreg Kurz if (spapr_get_vcpu_id(cpu) == vcpu_id) { 43462e886fb3SSam Bobroff return cpu; 43472e886fb3SSam Bobroff } 43482e886fb3SSam Bobroff } 43492e886fb3SSam Bobroff 43502e886fb3SSam Bobroff return NULL; 43512e886fb3SSam Bobroff } 43522e886fb3SSam Bobroff 435303ef074cSNicholas Piggin static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) 435403ef074cSNicholas Piggin { 435503ef074cSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 435603ef074cSNicholas Piggin 435703ef074cSNicholas Piggin /* These are only called by TCG, KVM maintains dispatch state */ 435803ef074cSNicholas Piggin 43593a6e6224SNicholas Piggin spapr_cpu->prod = false; 436003ef074cSNicholas Piggin if (spapr_cpu->vpa_addr) { 436103ef074cSNicholas Piggin CPUState *cs = CPU(cpu); 436203ef074cSNicholas Piggin uint32_t dispatch; 436303ef074cSNicholas Piggin 436403ef074cSNicholas Piggin dispatch = ldl_be_phys(cs->as, 436503ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); 436603ef074cSNicholas Piggin dispatch++; 436703ef074cSNicholas Piggin if ((dispatch & 1) != 0) { 436803ef074cSNicholas Piggin qemu_log_mask(LOG_GUEST_ERROR, 436903ef074cSNicholas Piggin "VPA: incorrect dispatch counter value for " 437003ef074cSNicholas Piggin "dispatched partition %u, correcting.\n", dispatch); 437103ef074cSNicholas Piggin dispatch++; 437203ef074cSNicholas Piggin } 437303ef074cSNicholas Piggin stl_be_phys(cs->as, 437403ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); 437503ef074cSNicholas Piggin } 437603ef074cSNicholas Piggin } 437703ef074cSNicholas Piggin 437803ef074cSNicholas Piggin static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) 437903ef074cSNicholas Piggin { 438003ef074cSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 438103ef074cSNicholas Piggin 438203ef074cSNicholas Piggin if (spapr_cpu->vpa_addr) { 438303ef074cSNicholas Piggin CPUState *cs = CPU(cpu); 438403ef074cSNicholas Piggin uint32_t dispatch; 438503ef074cSNicholas Piggin 438603ef074cSNicholas Piggin dispatch = ldl_be_phys(cs->as, 438703ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); 438803ef074cSNicholas Piggin dispatch++; 438903ef074cSNicholas Piggin if ((dispatch & 1) != 1) { 439003ef074cSNicholas Piggin qemu_log_mask(LOG_GUEST_ERROR, 439103ef074cSNicholas Piggin "VPA: incorrect dispatch counter value for " 439203ef074cSNicholas Piggin "preempted partition %u, correcting.\n", dispatch); 439303ef074cSNicholas Piggin dispatch++; 439403ef074cSNicholas Piggin } 439503ef074cSNicholas Piggin stl_be_phys(cs->as, 439603ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); 439703ef074cSNicholas Piggin } 439803ef074cSNicholas Piggin } 439903ef074cSNicholas Piggin 440029ee3247SAlexey Kardashevskiy static void spapr_machine_class_init(ObjectClass *oc, void *data) 440153018216SPaolo Bonzini { 440229ee3247SAlexey Kardashevskiy MachineClass *mc = MACHINE_CLASS(oc); 4403ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc); 440471461b0fSAlexey Kardashevskiy FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); 440534316482SAlexey Kardashevskiy NMIClass *nc = NMI_CLASS(oc); 4406c20d332aSBharata B Rao HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); 44071d1be34dSDavid Gibson PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); 44087844e12bSCédric Le Goater XICSFabricClass *xic = XICS_FABRIC_CLASS(oc); 44096449da45SCédric Le Goater InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); 4410932de7aeSCédric Le Goater XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc); 441129ee3247SAlexey Kardashevskiy 44120eb9054cSDavid Gibson mc->desc = "pSeries Logical Partition (PAPR compliant)"; 4413907aac2fSMark Cave-Ayland mc->ignore_boot_device_suffixes = true; 4414fc9f38c3SDavid Gibson 4415fc9f38c3SDavid Gibson /* 4416fc9f38c3SDavid Gibson * We set up the default / latest behaviour here. The class_init 4417fc9f38c3SDavid Gibson * functions for the specific versioned machine types can override 4418fc9f38c3SDavid Gibson * these details for backwards compatibility 4419fc9f38c3SDavid Gibson */ 4420bcb5ce08SDavid Gibson mc->init = spapr_machine_init; 4421bcb5ce08SDavid Gibson mc->reset = spapr_machine_reset; 4422958db90cSMarcel Apfelbaum mc->block_default_type = IF_SCSI; 44236244bb7eSGreg Kurz mc->max_cpus = 1024; 4424958db90cSMarcel Apfelbaum mc->no_parallel = 1; 44255b2128d2SAlexander Graf mc->default_boot_order = ""; 4426d23b6caaSPhilippe Mathieu-Daudé mc->default_ram_size = 512 * MiB; 4427ab74e543SIgor Mammedov mc->default_ram_id = "ppc_spapr.ram"; 442829f9cef3SSebastian Bauer mc->default_display = "std"; 4429958db90cSMarcel Apfelbaum mc->kvm_type = spapr_kvm_type; 44307da79a16SEduardo Habkost machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE); 4431e4024630SLaurent Vivier mc->pci_allow_0_address = true; 4432debbdc00SIgor Mammedov assert(!mc->get_hotplug_handler); 44337ebaf795SBharata B Rao mc->get_hotplug_handler = spapr_get_hotplug_handler; 443494a94e4cSBharata B Rao hc->pre_plug = spapr_machine_device_pre_plug; 4435c20d332aSBharata B Rao hc->plug = spapr_machine_device_plug; 4436ea089eebSIgor Mammedov mc->cpu_index_to_instance_props = spapr_cpu_index_to_props; 443779e07936SIgor Mammedov mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id; 4438535455fdSIgor Mammedov mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids; 4439cf632463SBharata B Rao hc->unplug_request = spapr_machine_device_unplug_request; 444088432f44SDavid Hildenbrand hc->unplug = spapr_machine_device_unplug; 444100b4fbe2SMarcel Apfelbaum 4442fc9f38c3SDavid Gibson smc->dr_lmb_enabled = true; 4443fea35ca4SAlexey Kardashevskiy smc->update_dt_enabled = true; 444434a6b015SCédric Le Goater mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); 4445c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = true; 4446ee3a71e3SShivaprasad G Bhat mc->nvdimm_supported = true; 444752b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; 444871461b0fSAlexey Kardashevskiy fwc->get_dev_path = spapr_get_fw_dev_path; 444934316482SAlexey Kardashevskiy nc->nmi_monitor_handler = spapr_nmi; 44506737d9adSDavid Gibson smc->phb_placement = spapr_phb_placement; 44511d1be34dSDavid Gibson vhc->hypercall = emulate_spapr_hypercall; 4452e57ca75cSDavid Gibson vhc->hpt_mask = spapr_hpt_mask; 4453e57ca75cSDavid Gibson vhc->map_hptes = spapr_map_hptes; 4454e57ca75cSDavid Gibson vhc->unmap_hptes = spapr_unmap_hptes; 4455a2dd4e83SBenjamin Herrenschmidt vhc->hpte_set_c = spapr_hpte_set_c; 4456a2dd4e83SBenjamin Herrenschmidt vhc->hpte_set_r = spapr_hpte_set_r; 445779825f4dSBenjamin Herrenschmidt vhc->get_pate = spapr_get_pate; 44581ec26c75SGreg Kurz vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr; 445903ef074cSNicholas Piggin vhc->cpu_exec_enter = spapr_cpu_exec_enter; 446003ef074cSNicholas Piggin vhc->cpu_exec_exit = spapr_cpu_exec_exit; 44617844e12bSCédric Le Goater xic->ics_get = spapr_ics_get; 44627844e12bSCédric Le Goater xic->ics_resend = spapr_ics_resend; 4463b2fc59aaSCédric Le Goater xic->icp_get = spapr_icp_get; 44646449da45SCédric Le Goater ispc->print_info = spapr_pic_print_info; 446555641213SLaurent Vivier /* Force NUMA node memory size to be a multiple of 446655641213SLaurent Vivier * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity 446755641213SLaurent Vivier * in which LMBs are represented and hot-added 446855641213SLaurent Vivier */ 446955641213SLaurent Vivier mc->numa_mem_align_shift = 28; 44700533ef5fSTao Xu mc->auto_enable_numa = true; 447133face6bSDavid Gibson 44724e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF; 44734e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON; 44744e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON; 44752782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; 44762782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; 44772782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND; 44782309832aSDavid Gibson smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */ 4479b9a477b7SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF; 4480edaa7995SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON; 448137965dfeSDavid Gibson smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON; 44828af7e1feSNicholas Piggin smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON; 448340c2281cSMarkus Armbruster spapr_caps_add_properties(smc); 4484bd94bc06SCédric Le Goater smc->irq = &spapr_irq_dual; 4485dae5e39aSMichael Roth smc->dr_phb_enabled = true; 44866c3829a2SAlexey Kardashevskiy smc->linux_pci_probe = true; 448729cb4187SGreg Kurz smc->smp_threads_vsmt = true; 448854255c1fSDavid Gibson smc->nr_xirqs = SPAPR_NR_XIRQS; 4489932de7aeSCédric Le Goater xfc->match_nvt = spapr_match_nvt; 449053018216SPaolo Bonzini } 449153018216SPaolo Bonzini 449229ee3247SAlexey Kardashevskiy static const TypeInfo spapr_machine_info = { 449329ee3247SAlexey Kardashevskiy .name = TYPE_SPAPR_MACHINE, 449429ee3247SAlexey Kardashevskiy .parent = TYPE_MACHINE, 44954aee7362SDavid Gibson .abstract = true, 4496ce2918cbSDavid Gibson .instance_size = sizeof(SpaprMachineState), 4497bcb5ce08SDavid Gibson .instance_init = spapr_instance_init, 449887bbdd9cSDavid Gibson .instance_finalize = spapr_machine_finalizefn, 4499ce2918cbSDavid Gibson .class_size = sizeof(SpaprMachineClass), 450029ee3247SAlexey Kardashevskiy .class_init = spapr_machine_class_init, 450171461b0fSAlexey Kardashevskiy .interfaces = (InterfaceInfo[]) { 450271461b0fSAlexey Kardashevskiy { TYPE_FW_PATH_PROVIDER }, 450334316482SAlexey Kardashevskiy { TYPE_NMI }, 4504c20d332aSBharata B Rao { TYPE_HOTPLUG_HANDLER }, 45051d1be34dSDavid Gibson { TYPE_PPC_VIRTUAL_HYPERVISOR }, 45067844e12bSCédric Le Goater { TYPE_XICS_FABRIC }, 45076449da45SCédric Le Goater { TYPE_INTERRUPT_STATS_PROVIDER }, 4508932de7aeSCédric Le Goater { TYPE_XIVE_FABRIC }, 450971461b0fSAlexey Kardashevskiy { } 451071461b0fSAlexey Kardashevskiy }, 451129ee3247SAlexey Kardashevskiy }; 451229ee3247SAlexey Kardashevskiy 4513a7849268SMichael S. Tsirkin static void spapr_machine_latest_class_options(MachineClass *mc) 4514a7849268SMichael S. Tsirkin { 4515a7849268SMichael S. Tsirkin mc->alias = "pseries"; 4516ea0ac7f6SPhilippe Mathieu-Daudé mc->is_default = true; 4517a7849268SMichael S. Tsirkin } 4518a7849268SMichael S. Tsirkin 4519fccbc785SDavid Gibson #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \ 45205013c547SDavid Gibson static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \ 45215013c547SDavid Gibson void *data) \ 45225013c547SDavid Gibson { \ 45235013c547SDavid Gibson MachineClass *mc = MACHINE_CLASS(oc); \ 45245013c547SDavid Gibson spapr_machine_##suffix##_class_options(mc); \ 4525fccbc785SDavid Gibson if (latest) { \ 4526a7849268SMichael S. Tsirkin spapr_machine_latest_class_options(mc); \ 4527fccbc785SDavid Gibson } \ 45285013c547SDavid Gibson } \ 45295013c547SDavid Gibson static const TypeInfo spapr_machine_##suffix##_info = { \ 45305013c547SDavid Gibson .name = MACHINE_TYPE_NAME("pseries-" verstr), \ 45315013c547SDavid Gibson .parent = TYPE_SPAPR_MACHINE, \ 45325013c547SDavid Gibson .class_init = spapr_machine_##suffix##_class_init, \ 45335013c547SDavid Gibson }; \ 45345013c547SDavid Gibson static void spapr_machine_register_##suffix(void) \ 45355013c547SDavid Gibson { \ 45365013c547SDavid Gibson type_register(&spapr_machine_##suffix##_info); \ 45375013c547SDavid Gibson } \ 45380e6aac87SEduardo Habkost type_init(spapr_machine_register_##suffix) 45395013c547SDavid Gibson 45401c5f29bbSDavid Gibson /* 4541576a00bdSCornelia Huck * pseries-6.0 45423eb74d20SCornelia Huck */ 4543576a00bdSCornelia Huck static void spapr_machine_6_0_class_options(MachineClass *mc) 45443eb74d20SCornelia Huck { 45453eb74d20SCornelia Huck /* Defaults for the latest behaviour inherited from the base class */ 45463eb74d20SCornelia Huck } 45473eb74d20SCornelia Huck 4548576a00bdSCornelia Huck DEFINE_SPAPR_MACHINE(6_0, "6.0", true); 4549576a00bdSCornelia Huck 4550576a00bdSCornelia Huck /* 4551576a00bdSCornelia Huck * pseries-5.2 4552576a00bdSCornelia Huck */ 4553576a00bdSCornelia Huck static void spapr_machine_5_2_class_options(MachineClass *mc) 4554576a00bdSCornelia Huck { 4555576a00bdSCornelia Huck spapr_machine_6_0_class_options(mc); 4556576a00bdSCornelia Huck compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len); 4557576a00bdSCornelia Huck } 4558576a00bdSCornelia Huck 4559576a00bdSCornelia Huck DEFINE_SPAPR_MACHINE(5_2, "5.2", false); 45603ff3c5d3SCornelia Huck 45613ff3c5d3SCornelia Huck /* 45623ff3c5d3SCornelia Huck * pseries-5.1 45633ff3c5d3SCornelia Huck */ 45643ff3c5d3SCornelia Huck static void spapr_machine_5_1_class_options(MachineClass *mc) 45653ff3c5d3SCornelia Huck { 456629bfe52aSDaniel Henrique Barboza SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 456729bfe52aSDaniel Henrique Barboza 45683ff3c5d3SCornelia Huck spapr_machine_5_2_class_options(mc); 45693ff3c5d3SCornelia Huck compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len); 457029bfe52aSDaniel Henrique Barboza smc->pre_5_2_numa_associativity = true; 45713ff3c5d3SCornelia Huck } 45723ff3c5d3SCornelia Huck 45733ff3c5d3SCornelia Huck DEFINE_SPAPR_MACHINE(5_1, "5.1", false); 4574541aaa1dSCornelia Huck 4575541aaa1dSCornelia Huck /* 4576541aaa1dSCornelia Huck * pseries-5.0 4577541aaa1dSCornelia Huck */ 4578541aaa1dSCornelia Huck static void spapr_machine_5_0_class_options(MachineClass *mc) 4579541aaa1dSCornelia Huck { 4580a6030d7eSReza Arbab SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4581a6030d7eSReza Arbab static GlobalProperty compat[] = { 4582a6030d7eSReza Arbab { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" }, 4583a6030d7eSReza Arbab }; 4584a6030d7eSReza Arbab 4585541aaa1dSCornelia Huck spapr_machine_5_1_class_options(mc); 4586541aaa1dSCornelia Huck compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len); 4587a6030d7eSReza Arbab compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 458832a354dcSIgor Mammedov mc->numa_mem_supported = true; 4589a6030d7eSReza Arbab smc->pre_5_1_assoc_refpoints = true; 4590541aaa1dSCornelia Huck } 4591541aaa1dSCornelia Huck 4592541aaa1dSCornelia Huck DEFINE_SPAPR_MACHINE(5_0, "5.0", false); 45933eb74d20SCornelia Huck 45943eb74d20SCornelia Huck /* 45959aec2e52SCornelia Huck * pseries-4.2 4596e2676b16SGreg Kurz */ 45979aec2e52SCornelia Huck static void spapr_machine_4_2_class_options(MachineClass *mc) 4598e2676b16SGreg Kurz { 459937965dfeSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 460037965dfeSDavid Gibson 46013eb74d20SCornelia Huck spapr_machine_5_0_class_options(mc); 46025f258577SEvgeny Yakovlev compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len); 460337965dfeSDavid Gibson smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF; 46048af7e1feSNicholas Piggin smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF; 46051052ab67SDavid Gibson smc->rma_limit = 16 * GiB; 4606ee3a71e3SShivaprasad G Bhat mc->nvdimm_supported = false; 4607e2676b16SGreg Kurz } 4608e2676b16SGreg Kurz 46093eb74d20SCornelia Huck DEFINE_SPAPR_MACHINE(4_2, "4.2", false); 46109aec2e52SCornelia Huck 46119aec2e52SCornelia Huck /* 46129aec2e52SCornelia Huck * pseries-4.1 46139aec2e52SCornelia Huck */ 46149aec2e52SCornelia Huck static void spapr_machine_4_1_class_options(MachineClass *mc) 46159aec2e52SCornelia Huck { 46166c3829a2SAlexey Kardashevskiy SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4617d15d4ad6SDavid Gibson static GlobalProperty compat[] = { 4618d15d4ad6SDavid Gibson /* Only allow 4kiB and 64kiB IOMMU pagesizes */ 4619d15d4ad6SDavid Gibson { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" }, 4620d15d4ad6SDavid Gibson }; 4621d15d4ad6SDavid Gibson 46229aec2e52SCornelia Huck spapr_machine_4_2_class_options(mc); 46236c3829a2SAlexey Kardashevskiy smc->linux_pci_probe = false; 462429cb4187SGreg Kurz smc->smp_threads_vsmt = false; 46259aec2e52SCornelia Huck compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len); 4626d15d4ad6SDavid Gibson compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 46279aec2e52SCornelia Huck } 46289aec2e52SCornelia Huck 46299aec2e52SCornelia Huck DEFINE_SPAPR_MACHINE(4_1, "4.1", false); 46309bf2650bSCornelia Huck 46319bf2650bSCornelia Huck /* 46329bf2650bSCornelia Huck * pseries-4.0 46339bf2650bSCornelia Huck */ 4634f5598c92SGreg Kurz static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index, 4635ec132efaSAlexey Kardashevskiy uint64_t *buid, hwaddr *pio, 4636ec132efaSAlexey Kardashevskiy hwaddr *mmio32, hwaddr *mmio64, 4637ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4638ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4639ec132efaSAlexey Kardashevskiy { 4640f5598c92SGreg Kurz if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, 4641f5598c92SGreg Kurz liobns, nv2gpa, nv2atsd, errp)) { 4642f5598c92SGreg Kurz return false; 4643ec132efaSAlexey Kardashevskiy } 4644ec132efaSAlexey Kardashevskiy 4645f5598c92SGreg Kurz *nv2gpa = 0; 4646f5598c92SGreg Kurz *nv2atsd = 0; 4647f5598c92SGreg Kurz return true; 4648f5598c92SGreg Kurz } 4649eb3cba82SDavid Gibson static void spapr_machine_4_0_class_options(MachineClass *mc) 4650eb3cba82SDavid Gibson { 4651eb3cba82SDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4652eb3cba82SDavid Gibson 4653eb3cba82SDavid Gibson spapr_machine_4_1_class_options(mc); 4654eb3cba82SDavid Gibson compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); 4655eb3cba82SDavid Gibson smc->phb_placement = phb_placement_4_0; 4656bd94bc06SCédric Le Goater smc->irq = &spapr_irq_xics; 46573725ef1aSGreg Kurz smc->pre_4_1_migration = true; 4658eb3cba82SDavid Gibson } 4659eb3cba82SDavid Gibson 4660eb3cba82SDavid Gibson DEFINE_SPAPR_MACHINE(4_0, "4.0", false); 4661eb3cba82SDavid Gibson 4662eb3cba82SDavid Gibson /* 4663eb3cba82SDavid Gibson * pseries-3.1 4664eb3cba82SDavid Gibson */ 466588cbe073SMarc-André Lureau static void spapr_machine_3_1_class_options(MachineClass *mc) 466688cbe073SMarc-André Lureau { 4667ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4668fea35ca4SAlexey Kardashevskiy 466984e060bfSAlex Williamson spapr_machine_4_0_class_options(mc); 4670abd93cc7SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); 467127461d69SPrasad J Pandit 467234a6b015SCédric Le Goater mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); 4673fea35ca4SAlexey Kardashevskiy smc->update_dt_enabled = false; 4674dae5e39aSMichael Roth smc->dr_phb_enabled = false; 46750a794529SDavid Gibson smc->broken_host_serial_model = true; 46762782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN; 46772782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN; 46782782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN; 4679edaa7995SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; 468084e060bfSAlex Williamson } 468184e060bfSAlex Williamson 468284e060bfSAlex Williamson DEFINE_SPAPR_MACHINE(3_1, "3.1", false); 4683d45360d9SCédric Le Goater 4684d45360d9SCédric Le Goater /* 4685d45360d9SCédric Le Goater * pseries-3.0 4686d45360d9SCédric Le Goater */ 4687d45360d9SCédric Le Goater 4688d45360d9SCédric Le Goater static void spapr_machine_3_0_class_options(MachineClass *mc) 4689d45360d9SCédric Le Goater { 4690ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 469182cffa2eSCédric Le Goater 4692d45360d9SCédric Le Goater spapr_machine_3_1_class_options(mc); 4693ddb3235dSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len); 469482cffa2eSCédric Le Goater 469582cffa2eSCédric Le Goater smc->legacy_irq_allocation = true; 469654255c1fSDavid Gibson smc->nr_xirqs = 0x400; 4697ae837402SCédric Le Goater smc->irq = &spapr_irq_xics_legacy; 4698d45360d9SCédric Le Goater } 4699d45360d9SCédric Le Goater 4700d45360d9SCédric Le Goater DEFINE_SPAPR_MACHINE(3_0, "3.0", false); 47018a4fd427SDavid Gibson 47028a4fd427SDavid Gibson /* 47038a4fd427SDavid Gibson * pseries-2.12 47048a4fd427SDavid Gibson */ 470588cbe073SMarc-André Lureau static void spapr_machine_2_12_class_options(MachineClass *mc) 470688cbe073SMarc-André Lureau { 4707ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 470888cbe073SMarc-André Lureau static GlobalProperty compat[] = { 47096c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" }, 47106c36bddfSEduardo Habkost { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" }, 4711fa386d98SMarc-André Lureau }; 47128a4fd427SDavid Gibson 4713d8c0c7afSPeter Maydell spapr_machine_3_0_class_options(mc); 47140d47310bSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); 471588cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 47162309832aSDavid Gibson 4717e8937295SGreg Kurz /* We depend on kvm_enabled() to choose a default value for the 4718e8937295SGreg Kurz * hpt-max-page-size capability. Of course we can't do it here 4719e8937295SGreg Kurz * because this is too early and the HW accelerator isn't initialzed 4720e8937295SGreg Kurz * yet. Postpone this to machine init (see default_caps_with_cpu()). 4721e8937295SGreg Kurz */ 4722e8937295SGreg Kurz smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0; 47238a4fd427SDavid Gibson } 47248a4fd427SDavid Gibson 47258a4fd427SDavid Gibson DEFINE_SPAPR_MACHINE(2_12, "2.12", false); 47262b615412SDavid Gibson 4727813f3cf6SSuraj Jitindar Singh static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc) 4728813f3cf6SSuraj Jitindar Singh { 4729ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4730813f3cf6SSuraj Jitindar Singh 4731813f3cf6SSuraj Jitindar Singh spapr_machine_2_12_class_options(mc); 4732813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; 4733813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; 4734813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD; 4735813f3cf6SSuraj Jitindar Singh } 4736813f3cf6SSuraj Jitindar Singh 4737813f3cf6SSuraj Jitindar Singh DEFINE_SPAPR_MACHINE(2_12_sxxm, "2.12-sxxm", false); 4738813f3cf6SSuraj Jitindar Singh 47392b615412SDavid Gibson /* 47402b615412SDavid Gibson * pseries-2.11 47412b615412SDavid Gibson */ 47422b615412SDavid Gibson 47432b615412SDavid Gibson static void spapr_machine_2_11_class_options(MachineClass *mc) 47442b615412SDavid Gibson { 4745ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4746ee76a09fSDavid Gibson 47472b615412SDavid Gibson spapr_machine_2_12_class_options(mc); 47484e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON; 474943df70a9SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); 47502b615412SDavid Gibson } 47512b615412SDavid Gibson 47522b615412SDavid Gibson DEFINE_SPAPR_MACHINE(2_11, "2.11", false); 4753e2676b16SGreg Kurz 4754e2676b16SGreg Kurz /* 47553fa14fbeSDavid Gibson * pseries-2.10 4756db800b21SDavid Gibson */ 4757e2676b16SGreg Kurz 47583fa14fbeSDavid Gibson static void spapr_machine_2_10_class_options(MachineClass *mc) 4759db800b21SDavid Gibson { 4760e2676b16SGreg Kurz spapr_machine_2_11_class_options(mc); 4761503224f4SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); 4762db800b21SDavid Gibson } 4763db800b21SDavid Gibson 4764e2676b16SGreg Kurz DEFINE_SPAPR_MACHINE(2_10, "2.10", false); 47653fa14fbeSDavid Gibson 47663fa14fbeSDavid Gibson /* 47673fa14fbeSDavid Gibson * pseries-2.9 47683fa14fbeSDavid Gibson */ 476988cbe073SMarc-André Lureau 477088cbe073SMarc-André Lureau static void spapr_machine_2_9_class_options(MachineClass *mc) 477188cbe073SMarc-André Lureau { 4772ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 477388cbe073SMarc-André Lureau static GlobalProperty compat[] = { 47746c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" }, 4775fa386d98SMarc-André Lureau }; 47763fa14fbeSDavid Gibson 47773fa14fbeSDavid Gibson spapr_machine_2_10_class_options(mc); 47783e803152SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); 477988cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 478046f7afa3SGreg Kurz smc->pre_2_10_has_unused_icps = true; 478152b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED; 47823fa14fbeSDavid Gibson } 47833fa14fbeSDavid Gibson 47843fa14fbeSDavid Gibson DEFINE_SPAPR_MACHINE(2_9, "2.9", false); 4785fa325e6cSDavid Gibson 4786fa325e6cSDavid Gibson /* 4787fa325e6cSDavid Gibson * pseries-2.8 4788fa325e6cSDavid Gibson */ 478988cbe073SMarc-André Lureau 479088cbe073SMarc-André Lureau static void spapr_machine_2_8_class_options(MachineClass *mc) 479188cbe073SMarc-André Lureau { 479288cbe073SMarc-André Lureau static GlobalProperty compat[] = { 47936c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" }, 4794fa386d98SMarc-André Lureau }; 4795fa325e6cSDavid Gibson 4796fa325e6cSDavid Gibson spapr_machine_2_9_class_options(mc); 4797edc24ccdSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); 479888cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 479955641213SLaurent Vivier mc->numa_mem_align_shift = 23; 4800fa325e6cSDavid Gibson } 4801fa325e6cSDavid Gibson 4802fa325e6cSDavid Gibson DEFINE_SPAPR_MACHINE(2_8, "2.8", false); 4803db800b21SDavid Gibson 4804db800b21SDavid Gibson /* 48051ea1eefcSBharata B Rao * pseries-2.7 48061ea1eefcSBharata B Rao */ 4807357d1e3bSDavid Gibson 4808f5598c92SGreg Kurz static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, 4809357d1e3bSDavid Gibson uint64_t *buid, hwaddr *pio, 4810357d1e3bSDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 4811ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4812ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4813357d1e3bSDavid Gibson { 4814357d1e3bSDavid Gibson /* Legacy PHB placement for pseries-2.7 and earlier machine types */ 4815357d1e3bSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 4816357d1e3bSDavid Gibson const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */ 4817357d1e3bSDavid Gibson const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */ 4818357d1e3bSDavid Gibson const hwaddr pio_offset = 0x80000000; /* 2 GiB */ 4819357d1e3bSDavid Gibson const uint32_t max_index = 255; 4820357d1e3bSDavid Gibson const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */ 4821357d1e3bSDavid Gibson 4822357d1e3bSDavid Gibson uint64_t ram_top = MACHINE(spapr)->ram_size; 4823357d1e3bSDavid Gibson hwaddr phb0_base, phb_base; 4824357d1e3bSDavid Gibson int i; 4825357d1e3bSDavid Gibson 48260c9269a5SDavid Hildenbrand /* Do we have device memory? */ 4827357d1e3bSDavid Gibson if (MACHINE(spapr)->maxram_size > ram_top) { 4828357d1e3bSDavid Gibson /* Can't just use maxram_size, because there may be an 48290c9269a5SDavid Hildenbrand * alignment gap between normal and device memory regions 48300c9269a5SDavid Hildenbrand */ 4831b0c14ec4SDavid Hildenbrand ram_top = MACHINE(spapr)->device_memory->base + 4832b0c14ec4SDavid Hildenbrand memory_region_size(&MACHINE(spapr)->device_memory->mr); 4833357d1e3bSDavid Gibson } 4834357d1e3bSDavid Gibson 4835357d1e3bSDavid Gibson phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment); 4836357d1e3bSDavid Gibson 4837357d1e3bSDavid Gibson if (index > max_index) { 4838357d1e3bSDavid Gibson error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)", 4839357d1e3bSDavid Gibson max_index); 4840f5598c92SGreg Kurz return false; 4841357d1e3bSDavid Gibson } 4842357d1e3bSDavid Gibson 4843357d1e3bSDavid Gibson *buid = base_buid + index; 4844357d1e3bSDavid Gibson for (i = 0; i < n_dma; ++i) { 4845357d1e3bSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 4846357d1e3bSDavid Gibson } 4847357d1e3bSDavid Gibson 4848357d1e3bSDavid Gibson phb_base = phb0_base + index * phb_spacing; 4849357d1e3bSDavid Gibson *pio = phb_base + pio_offset; 4850357d1e3bSDavid Gibson *mmio32 = phb_base + mmio_offset; 4851357d1e3bSDavid Gibson /* 4852357d1e3bSDavid Gibson * We don't set the 64-bit MMIO window, relying on the PHB's 4853357d1e3bSDavid Gibson * fallback behaviour of automatically splitting a large "32-bit" 4854357d1e3bSDavid Gibson * window into contiguous 32-bit and 64-bit windows 4855357d1e3bSDavid Gibson */ 4856ec132efaSAlexey Kardashevskiy 4857ec132efaSAlexey Kardashevskiy *nv2gpa = 0; 4858ec132efaSAlexey Kardashevskiy *nv2atsd = 0; 4859f5598c92SGreg Kurz return true; 4860357d1e3bSDavid Gibson } 4861db800b21SDavid Gibson 48621ea1eefcSBharata B Rao static void spapr_machine_2_7_class_options(MachineClass *mc) 48631ea1eefcSBharata B Rao { 4864ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 486588cbe073SMarc-André Lureau static GlobalProperty compat[] = { 48666c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", }, 48676c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", }, 48686c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", }, 48696c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", }, 487088cbe073SMarc-André Lureau }; 48713daa4a9fSThomas Huth 4872db800b21SDavid Gibson spapr_machine_2_8_class_options(mc); 48732e9c10ebSIgor Mammedov mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3"); 4874a140c199SEduardo Habkost mc->default_machine_opts = "modern-hotplug-events=off"; 48755a995064SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); 487688cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 4877357d1e3bSDavid Gibson smc->phb_placement = phb_placement_2_7; 48781ea1eefcSBharata B Rao } 48791ea1eefcSBharata B Rao 4880db800b21SDavid Gibson DEFINE_SPAPR_MACHINE(2_7, "2.7", false); 48811ea1eefcSBharata B Rao 48821ea1eefcSBharata B Rao /* 48834b23699cSDavid Gibson * pseries-2.6 48844b23699cSDavid Gibson */ 488588cbe073SMarc-André Lureau 488688cbe073SMarc-André Lureau static void spapr_machine_2_6_class_options(MachineClass *mc) 488788cbe073SMarc-André Lureau { 488888cbe073SMarc-André Lureau static GlobalProperty compat[] = { 48896c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" }, 4890fa386d98SMarc-André Lureau }; 48911ea1eefcSBharata B Rao 48921ea1eefcSBharata B Rao spapr_machine_2_7_class_options(mc); 4893c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = false; 4894ff8f261fSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); 489588cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 48964b23699cSDavid Gibson } 48974b23699cSDavid Gibson 48981ea1eefcSBharata B Rao DEFINE_SPAPR_MACHINE(2_6, "2.6", false); 48994b23699cSDavid Gibson 49004b23699cSDavid Gibson /* 49011c5f29bbSDavid Gibson * pseries-2.5 49021c5f29bbSDavid Gibson */ 490388cbe073SMarc-André Lureau 490488cbe073SMarc-André Lureau static void spapr_machine_2_5_class_options(MachineClass *mc) 490588cbe073SMarc-André Lureau { 4906ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 490788cbe073SMarc-André Lureau static GlobalProperty compat[] = { 49086c36bddfSEduardo Habkost { "spapr-vlan", "use-rx-buffer-pools", "off" }, 4909fa386d98SMarc-André Lureau }; 49104b23699cSDavid Gibson 49114b23699cSDavid Gibson spapr_machine_2_6_class_options(mc); 491257040d45SThomas Huth smc->use_ohci_by_default = true; 4913fe759610SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len); 491488cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 49151c5f29bbSDavid Gibson } 49161c5f29bbSDavid Gibson 49174b23699cSDavid Gibson DEFINE_SPAPR_MACHINE(2_5, "2.5", false); 49181c5f29bbSDavid Gibson 49191c5f29bbSDavid Gibson /* 49201c5f29bbSDavid Gibson * pseries-2.4 49211c5f29bbSDavid Gibson */ 492280fd50f9SCornelia Huck 49235013c547SDavid Gibson static void spapr_machine_2_4_class_options(MachineClass *mc) 49245013c547SDavid Gibson { 4925ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4926fc9f38c3SDavid Gibson 4927fc9f38c3SDavid Gibson spapr_machine_2_5_class_options(mc); 4928fc9f38c3SDavid Gibson smc->dr_lmb_enabled = false; 49292f99b9c2SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len); 49301c5f29bbSDavid Gibson } 49311c5f29bbSDavid Gibson 4932fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_4, "2.4", false); 49331c5f29bbSDavid Gibson 49341c5f29bbSDavid Gibson /* 49351c5f29bbSDavid Gibson * pseries-2.3 49361c5f29bbSDavid Gibson */ 493788cbe073SMarc-André Lureau 493888cbe073SMarc-André Lureau static void spapr_machine_2_3_class_options(MachineClass *mc) 493988cbe073SMarc-André Lureau { 494088cbe073SMarc-André Lureau static GlobalProperty compat[] = { 49416c36bddfSEduardo Habkost { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" }, 4942fa386d98SMarc-André Lureau }; 4943fc9f38c3SDavid Gibson spapr_machine_2_4_class_options(mc); 49448995dd90SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len); 494588cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 49461c5f29bbSDavid Gibson } 4947fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_3, "2.3", false); 49481c5f29bbSDavid Gibson 49491c5f29bbSDavid Gibson /* 49501c5f29bbSDavid Gibson * pseries-2.2 49511c5f29bbSDavid Gibson */ 495288cbe073SMarc-André Lureau 495388cbe073SMarc-André Lureau static void spapr_machine_2_2_class_options(MachineClass *mc) 495488cbe073SMarc-André Lureau { 495588cbe073SMarc-André Lureau static GlobalProperty compat[] = { 49566c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" }, 4957fa386d98SMarc-André Lureau }; 4958b194df47SAlexey Kardashevskiy 4959fc9f38c3SDavid Gibson spapr_machine_2_3_class_options(mc); 49601c30044eSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len); 496188cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 4962f6d0656bSEduardo Habkost mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on"; 49631c5f29bbSDavid Gibson } 4964fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_2, "2.2", false); 49651c5f29bbSDavid Gibson 49661c5f29bbSDavid Gibson /* 49671c5f29bbSDavid Gibson * pseries-2.1 49681c5f29bbSDavid Gibson */ 49691c5f29bbSDavid Gibson 49705013c547SDavid Gibson static void spapr_machine_2_1_class_options(MachineClass *mc) 4971b0e966d0SJason Wang { 4972fc9f38c3SDavid Gibson spapr_machine_2_2_class_options(mc); 4973c4fc5695SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len); 49746026db45SAlexey Kardashevskiy } 4975fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_1, "2.1", false); 49766026db45SAlexey Kardashevskiy 497729ee3247SAlexey Kardashevskiy static void spapr_machine_register_types(void) 497829ee3247SAlexey Kardashevskiy { 497929ee3247SAlexey Kardashevskiy type_register_static(&spapr_machine_info); 498029ee3247SAlexey Kardashevskiy } 498129ee3247SAlexey Kardashevskiy 498229ee3247SAlexey Kardashevskiy type_init(spapr_machine_register_types) 4983