153018216SPaolo Bonzini /* 253018216SPaolo Bonzini * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 353018216SPaolo Bonzini * 453018216SPaolo Bonzini * Copyright (c) 2004-2007 Fabrice Bellard 553018216SPaolo Bonzini * Copyright (c) 2007 Jocelyn Mayer 653018216SPaolo Bonzini * Copyright (c) 2010 David Gibson, IBM Corporation. 753018216SPaolo Bonzini * 853018216SPaolo Bonzini * Permission is hereby granted, free of charge, to any person obtaining a copy 953018216SPaolo Bonzini * of this software and associated documentation files (the "Software"), to deal 1053018216SPaolo Bonzini * in the Software without restriction, including without limitation the rights 1153018216SPaolo Bonzini * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1253018216SPaolo Bonzini * copies of the Software, and to permit persons to whom the Software is 1353018216SPaolo Bonzini * furnished to do so, subject to the following conditions: 1453018216SPaolo Bonzini * 1553018216SPaolo Bonzini * The above copyright notice and this permission notice shall be included in 1653018216SPaolo Bonzini * all copies or substantial portions of the Software. 1753018216SPaolo Bonzini * 1853018216SPaolo Bonzini * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1953018216SPaolo Bonzini * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2053018216SPaolo Bonzini * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 2153018216SPaolo Bonzini * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2253018216SPaolo Bonzini * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2353018216SPaolo Bonzini * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2453018216SPaolo Bonzini * THE SOFTWARE. 2553018216SPaolo Bonzini */ 26a8d25326SMarkus Armbruster 270d75590dSPeter Maydell #include "qemu/osdep.h" 28a8d25326SMarkus Armbruster #include "qemu-common.h" 29da34e65cSMarkus Armbruster #include "qapi/error.h" 30fa98fbfcSSam Bobroff #include "qapi/visitor.h" 3153018216SPaolo Bonzini #include "sysemu/sysemu.h" 32b58c5c2dSMarkus Armbruster #include "sysemu/hostmem.h" 33e35704baSEduardo Habkost #include "sysemu/numa.h" 3423ff81bdSGreg Kurz #include "sysemu/qtest.h" 3571e8a915SMarkus Armbruster #include "sysemu/reset.h" 3654d31236SMarkus Armbruster #include "sysemu/runstate.h" 3703dd024fSPaolo Bonzini #include "qemu/log.h" 3871461b0fSAlexey Kardashevskiy #include "hw/fw-path-provider.h" 3953018216SPaolo Bonzini #include "elf.h" 4053018216SPaolo Bonzini #include "net/net.h" 41ad440b4aSAndrew Jones #include "sysemu/device_tree.h" 4253018216SPaolo Bonzini #include "sysemu/cpus.h" 43b3946626SVincent Palatin #include "sysemu/hw_accel.h" 4453018216SPaolo Bonzini #include "kvm_ppc.h" 45c4b63b7cSJuan Quintela #include "migration/misc.h" 46ca77ee28SMarkus Armbruster #include "migration/qemu-file-types.h" 4784a899deSJuan Quintela #include "migration/global_state.h" 48f2a8f0a6SJuan Quintela #include "migration/register.h" 492500fb42SAravinda Prasad #include "migration/blocker.h" 504be21d56SDavid Gibson #include "mmu-hash64.h" 51b4db5413SSuraj Jitindar Singh #include "mmu-book3s-v3.h" 527abd43baSSuraj Jitindar Singh #include "cpu-models.h" 532e5b09fdSMarkus Armbruster #include "hw/core/cpu.h" 5453018216SPaolo Bonzini 5553018216SPaolo Bonzini #include "hw/boards.h" 560d09e41aSPaolo Bonzini #include "hw/ppc/ppc.h" 5753018216SPaolo Bonzini #include "hw/loader.h" 5853018216SPaolo Bonzini 597804c353SCédric Le Goater #include "hw/ppc/fdt.h" 600d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h" 610d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h" 62a27bd6c7SMarkus Armbruster #include "hw/qdev-properties.h" 630d09e41aSPaolo Bonzini #include "hw/pci-host/spapr.h" 6453018216SPaolo Bonzini #include "hw/pci/msi.h" 6553018216SPaolo Bonzini 6653018216SPaolo Bonzini #include "hw/pci/pci.h" 6771461b0fSAlexey Kardashevskiy #include "hw/scsi/scsi.h" 6871461b0fSAlexey Kardashevskiy #include "hw/virtio/virtio-scsi.h" 69c4e13492SFelipe Franciosi #include "hw/virtio/vhost-scsi-common.h" 7053018216SPaolo Bonzini 7153018216SPaolo Bonzini #include "exec/address-spaces.h" 722309832aSDavid Gibson #include "exec/ram_addr.h" 7353018216SPaolo Bonzini #include "hw/usb.h" 7453018216SPaolo Bonzini #include "qemu/config-file.h" 75135a129aSAneesh Kumar K.V #include "qemu/error-report.h" 762a6593cbSAlexey Kardashevskiy #include "trace.h" 7734316482SAlexey Kardashevskiy #include "hw/nmi.h" 786449da45SCédric Le Goater #include "hw/intc/intc.h" 7953018216SPaolo Bonzini 8094a94e4cSBharata B Rao #include "hw/ppc/spapr_cpu_core.h" 812cc0e2e8SDavid Hildenbrand #include "hw/mem/memory-device.h" 820fb6bd07SMichael Roth #include "hw/ppc/spapr_tpm_proxy.h" 83ee3a71e3SShivaprasad G Bhat #include "hw/ppc/spapr_nvdimm.h" 841eee9950SDaniel Henrique Barboza #include "hw/ppc/spapr_numa.h" 8568a27b20SMichael S. Tsirkin 86f041d6afSGreg Kurz #include "monitor/monitor.h" 87f041d6afSGreg Kurz 8853018216SPaolo Bonzini #include <libfdt.h> 8953018216SPaolo Bonzini 9053018216SPaolo Bonzini /* SLOF memory layout: 9153018216SPaolo Bonzini * 9253018216SPaolo Bonzini * SLOF raw image loaded at 0, copies its romfs right below the flat 9353018216SPaolo Bonzini * device-tree, then position SLOF itself 31M below that 9453018216SPaolo Bonzini * 9553018216SPaolo Bonzini * So we set FW_OVERHEAD to 40MB which should account for all of that 9653018216SPaolo Bonzini * and more 9753018216SPaolo Bonzini * 9853018216SPaolo Bonzini * We load our kernel at 4M, leaving space for SLOF initial image 9953018216SPaolo Bonzini */ 100b7d1f77aSBenjamin Herrenschmidt #define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */ 10153018216SPaolo Bonzini #define FW_MAX_SIZE 0x400000 10253018216SPaolo Bonzini #define FW_FILE_NAME "slof.bin" 10353018216SPaolo Bonzini #define FW_OVERHEAD 0x2800000 10453018216SPaolo Bonzini #define KERNEL_LOAD_ADDR FW_MAX_SIZE 10553018216SPaolo Bonzini 1069943266eSDavid Gibson #define MIN_RMA_SLOF (128 * MiB) 10753018216SPaolo Bonzini 1085c7adcf4SGreg Kurz #define PHANDLE_INTC 0x00001111 10953018216SPaolo Bonzini 1105d0fb150SGreg Kurz /* These two functions implement the VCPU id numbering: one to compute them 1115d0fb150SGreg Kurz * all and one to identify thread 0 of a VCORE. Any change to the first one 1125d0fb150SGreg Kurz * is likely to have an impact on the second one, so let's keep them close. 1135d0fb150SGreg Kurz */ 114ce2918cbSDavid Gibson static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index) 1155d0fb150SGreg Kurz { 116fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 117fe6b6346SLike Xu unsigned int smp_threads = ms->smp.threads; 118fe6b6346SLike Xu 1191a5008fcSGreg Kurz assert(spapr->vsmt); 1205d0fb150SGreg Kurz return 1215d0fb150SGreg Kurz (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads; 1225d0fb150SGreg Kurz } 123ce2918cbSDavid Gibson static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr, 1245d0fb150SGreg Kurz PowerPCCPU *cpu) 1255d0fb150SGreg Kurz { 1261a5008fcSGreg Kurz assert(spapr->vsmt); 1275d0fb150SGreg Kurz return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0; 1285d0fb150SGreg Kurz } 1295d0fb150SGreg Kurz 13046f7afa3SGreg Kurz static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque) 13146f7afa3SGreg Kurz { 13246f7afa3SGreg Kurz /* Dummy entries correspond to unused ICPState objects in older QEMUs, 13346f7afa3SGreg Kurz * and newer QEMUs don't even have them. In both cases, we don't want 13446f7afa3SGreg Kurz * to send anything on the wire. 13546f7afa3SGreg Kurz */ 13646f7afa3SGreg Kurz return false; 13746f7afa3SGreg Kurz } 13846f7afa3SGreg Kurz 13946f7afa3SGreg Kurz static const VMStateDescription pre_2_10_vmstate_dummy_icp = { 14046f7afa3SGreg Kurz .name = "icp/server", 14146f7afa3SGreg Kurz .version_id = 1, 14246f7afa3SGreg Kurz .minimum_version_id = 1, 14346f7afa3SGreg Kurz .needed = pre_2_10_vmstate_dummy_icp_needed, 14446f7afa3SGreg Kurz .fields = (VMStateField[]) { 14546f7afa3SGreg Kurz VMSTATE_UNUSED(4), /* uint32_t xirr */ 14646f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t pending_priority */ 14746f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t mfrr */ 14846f7afa3SGreg Kurz VMSTATE_END_OF_LIST() 14946f7afa3SGreg Kurz }, 15046f7afa3SGreg Kurz }; 15146f7afa3SGreg Kurz 15246f7afa3SGreg Kurz static void pre_2_10_vmstate_register_dummy_icp(int i) 15346f7afa3SGreg Kurz { 15446f7afa3SGreg Kurz vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp, 15546f7afa3SGreg Kurz (void *)(uintptr_t) i); 15646f7afa3SGreg Kurz } 15746f7afa3SGreg Kurz 15846f7afa3SGreg Kurz static void pre_2_10_vmstate_unregister_dummy_icp(int i) 15946f7afa3SGreg Kurz { 16046f7afa3SGreg Kurz vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp, 16146f7afa3SGreg Kurz (void *)(uintptr_t) i); 16246f7afa3SGreg Kurz } 16346f7afa3SGreg Kurz 164ce2918cbSDavid Gibson int spapr_max_server_number(SpaprMachineState *spapr) 16546f7afa3SGreg Kurz { 166fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 167fe6b6346SLike Xu 1681a5008fcSGreg Kurz assert(spapr->vsmt); 169fe6b6346SLike Xu return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads); 17046f7afa3SGreg Kurz } 17146f7afa3SGreg Kurz 172833d4668SAlexey Kardashevskiy static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, 173833d4668SAlexey Kardashevskiy int smt_threads) 174833d4668SAlexey Kardashevskiy { 175833d4668SAlexey Kardashevskiy int i, ret = 0; 176833d4668SAlexey Kardashevskiy uint32_t servers_prop[smt_threads]; 177833d4668SAlexey Kardashevskiy uint32_t gservers_prop[smt_threads * 2]; 17814bb4486SGreg Kurz int index = spapr_get_vcpu_id(cpu); 179833d4668SAlexey Kardashevskiy 180d6e166c0SDavid Gibson if (cpu->compat_pvr) { 181d6e166c0SDavid Gibson ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr); 1826d9412eaSAlexey Kardashevskiy if (ret < 0) { 1836d9412eaSAlexey Kardashevskiy return ret; 1846d9412eaSAlexey Kardashevskiy } 1856d9412eaSAlexey Kardashevskiy } 1866d9412eaSAlexey Kardashevskiy 187833d4668SAlexey Kardashevskiy /* Build interrupt servers and gservers properties */ 188833d4668SAlexey Kardashevskiy for (i = 0; i < smt_threads; i++) { 189833d4668SAlexey Kardashevskiy servers_prop[i] = cpu_to_be32(index + i); 190833d4668SAlexey Kardashevskiy /* Hack, direct the group queues back to cpu 0 */ 191833d4668SAlexey Kardashevskiy gservers_prop[i*2] = cpu_to_be32(index + i); 192833d4668SAlexey Kardashevskiy gservers_prop[i*2 + 1] = 0; 193833d4668SAlexey Kardashevskiy } 194833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", 195833d4668SAlexey Kardashevskiy servers_prop, sizeof(servers_prop)); 196833d4668SAlexey Kardashevskiy if (ret < 0) { 197833d4668SAlexey Kardashevskiy return ret; 198833d4668SAlexey Kardashevskiy } 199833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s", 200833d4668SAlexey Kardashevskiy gservers_prop, sizeof(gservers_prop)); 201833d4668SAlexey Kardashevskiy 202833d4668SAlexey Kardashevskiy return ret; 203833d4668SAlexey Kardashevskiy } 204833d4668SAlexey Kardashevskiy 20591335a5eSDavid Gibson static void spapr_dt_pa_features(SpaprMachineState *spapr, 206ee76a09fSDavid Gibson PowerPCCPU *cpu, 207daa36379SDavid Gibson void *fdt, int offset) 20886d5771aSSam Bobroff { 20986d5771aSSam Bobroff uint8_t pa_features_206[] = { 6, 0, 21086d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 }; 21186d5771aSSam Bobroff uint8_t pa_features_207[] = { 24, 0, 21286d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, 21386d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 21486d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 21586d5771aSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; 2169fb4541fSSam Bobroff uint8_t pa_features_300[] = { 66, 0, 2179fb4541fSSam Bobroff /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ 2189fb4541fSSam Bobroff /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */ 21986d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ 2209fb4541fSSam Bobroff /* 6: DS207 */ 22186d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ 2229fb4541fSSam Bobroff /* 16: Vector */ 22386d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ 2249fb4541fSSam Bobroff /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ 2259bf502feSDavid Gibson 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ 2269fb4541fSSam Bobroff /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ 2279fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ 2289fb4541fSSam Bobroff /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */ 2299fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ 2309fb4541fSSam Bobroff /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */ 2319fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */ 2329fb4541fSSam Bobroff /* 42: PM, 44: PC RA, 46: SC vec'd */ 2339fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ 2349fb4541fSSam Bobroff /* 48: SIMD, 50: QP BFP, 52: String */ 2359fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ 2369fb4541fSSam Bobroff /* 54: DecFP, 56: DecI, 58: SHA */ 2379fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ 2389fb4541fSSam Bobroff /* 60: NM atomic, 62: RNG */ 2399fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ 2409fb4541fSSam Bobroff }; 2417abd43baSSuraj Jitindar Singh uint8_t *pa_features = NULL; 24286d5771aSSam Bobroff size_t pa_size; 24386d5771aSSam Bobroff 2447abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) { 24586d5771aSSam Bobroff pa_features = pa_features_206; 24686d5771aSSam Bobroff pa_size = sizeof(pa_features_206); 2477abd43baSSuraj Jitindar Singh } 2487abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) { 24986d5771aSSam Bobroff pa_features = pa_features_207; 25086d5771aSSam Bobroff pa_size = sizeof(pa_features_207); 2517abd43baSSuraj Jitindar Singh } 2527abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) { 25386d5771aSSam Bobroff pa_features = pa_features_300; 25486d5771aSSam Bobroff pa_size = sizeof(pa_features_300); 2557abd43baSSuraj Jitindar Singh } 2567abd43baSSuraj Jitindar Singh if (!pa_features) { 25786d5771aSSam Bobroff return; 25886d5771aSSam Bobroff } 25986d5771aSSam Bobroff 26026cd35b8SDavid Gibson if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) { 26186d5771aSSam Bobroff /* 26286d5771aSSam Bobroff * Note: we keep CI large pages off by default because a 64K capable 26386d5771aSSam Bobroff * guest provisioned with large pages might otherwise try to map a qemu 26486d5771aSSam Bobroff * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages 26586d5771aSSam Bobroff * even if that qemu runs on a 4k host. 26686d5771aSSam Bobroff * We dd this bit back here if we are confident this is not an issue 26786d5771aSSam Bobroff */ 26886d5771aSSam Bobroff pa_features[3] |= 0x20; 26986d5771aSSam Bobroff } 2704e5fe368SSuraj Jitindar Singh if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) { 27186d5771aSSam Bobroff pa_features[24] |= 0x80; /* Transactional memory support */ 27286d5771aSSam Bobroff } 273daa36379SDavid Gibson if (spapr->cas_pre_isa3_guest && pa_size > 40) { 274e957f6a9SSam Bobroff /* Workaround for broken kernels that attempt (guest) radix 275e957f6a9SSam Bobroff * mode when they can't handle it, if they see the radix bit set 276e957f6a9SSam Bobroff * in pa-features. So hide it from them. */ 277e957f6a9SSam Bobroff pa_features[40 + 2] &= ~0x80; /* Radix MMU */ 278e957f6a9SSam Bobroff } 27986d5771aSSam Bobroff 28086d5771aSSam Bobroff _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size))); 28186d5771aSSam Bobroff } 28286d5771aSSam Bobroff 283c86c1affSDaniel Henrique Barboza static hwaddr spapr_node0_size(MachineState *machine) 284b082d65aSAlexey Kardashevskiy { 285aa570207STao Xu if (machine->numa_state->num_nodes) { 286b082d65aSAlexey Kardashevskiy int i; 287aa570207STao Xu for (i = 0; i < machine->numa_state->num_nodes; ++i) { 2887e721e7bSTao Xu if (machine->numa_state->nodes[i].node_mem) { 2897e721e7bSTao Xu return MIN(pow2floor(machine->numa_state->nodes[i].node_mem), 290fb164994SDavid Gibson machine->ram_size); 291b082d65aSAlexey Kardashevskiy } 292b082d65aSAlexey Kardashevskiy } 293b082d65aSAlexey Kardashevskiy } 294fb164994SDavid Gibson return machine->ram_size; 295b082d65aSAlexey Kardashevskiy } 296b082d65aSAlexey Kardashevskiy 297a1d59c0fSAlexey Kardashevskiy static void add_str(GString *s, const gchar *s1) 298a1d59c0fSAlexey Kardashevskiy { 299a1d59c0fSAlexey Kardashevskiy g_string_append_len(s, s1, strlen(s1) + 1); 300a1d59c0fSAlexey Kardashevskiy } 30153018216SPaolo Bonzini 302f1aa45ffSDaniel Henrique Barboza static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid, 303f1aa45ffSDaniel Henrique Barboza hwaddr start, hwaddr size) 30426a8c353SAlexey Kardashevskiy { 30526a8c353SAlexey Kardashevskiy char mem_name[32]; 30626a8c353SAlexey Kardashevskiy uint64_t mem_reg_property[2]; 30726a8c353SAlexey Kardashevskiy int off; 30826a8c353SAlexey Kardashevskiy 30926a8c353SAlexey Kardashevskiy mem_reg_property[0] = cpu_to_be64(start); 31026a8c353SAlexey Kardashevskiy mem_reg_property[1] = cpu_to_be64(size); 31126a8c353SAlexey Kardashevskiy 3123a17e38fSAlexey Kardashevskiy sprintf(mem_name, "memory@%" HWADDR_PRIx, start); 31326a8c353SAlexey Kardashevskiy off = fdt_add_subnode(fdt, 0, mem_name); 31426a8c353SAlexey Kardashevskiy _FDT(off); 31526a8c353SAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 31626a8c353SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 31726a8c353SAlexey Kardashevskiy sizeof(mem_reg_property)))); 318f1aa45ffSDaniel Henrique Barboza spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid); 31903d196b7SBharata B Rao return off; 32026a8c353SAlexey Kardashevskiy } 32126a8c353SAlexey Kardashevskiy 322f47bd1c8SIgor Mammedov static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr) 323f47bd1c8SIgor Mammedov { 324f47bd1c8SIgor Mammedov MemoryDeviceInfoList *info; 325f47bd1c8SIgor Mammedov 326f47bd1c8SIgor Mammedov for (info = list; info; info = info->next) { 327f47bd1c8SIgor Mammedov MemoryDeviceInfo *value = info->value; 328f47bd1c8SIgor Mammedov 329f47bd1c8SIgor Mammedov if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) { 330f47bd1c8SIgor Mammedov PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data; 331f47bd1c8SIgor Mammedov 332ccc2cef8SDavid Gibson if (addr >= pcdimm_info->addr && 333f47bd1c8SIgor Mammedov addr < (pcdimm_info->addr + pcdimm_info->size)) { 334f47bd1c8SIgor Mammedov return pcdimm_info->node; 335f47bd1c8SIgor Mammedov } 336f47bd1c8SIgor Mammedov } 337f47bd1c8SIgor Mammedov } 338f47bd1c8SIgor Mammedov 339f47bd1c8SIgor Mammedov return -1; 340f47bd1c8SIgor Mammedov } 341f47bd1c8SIgor Mammedov 342a324d6f1SBharata B Rao struct sPAPRDrconfCellV2 { 343a324d6f1SBharata B Rao uint32_t seq_lmbs; 344a324d6f1SBharata B Rao uint64_t base_addr; 345a324d6f1SBharata B Rao uint32_t drc_index; 346a324d6f1SBharata B Rao uint32_t aa_index; 347a324d6f1SBharata B Rao uint32_t flags; 348a324d6f1SBharata B Rao } QEMU_PACKED; 349a324d6f1SBharata B Rao 350a324d6f1SBharata B Rao typedef struct DrconfCellQueue { 351a324d6f1SBharata B Rao struct sPAPRDrconfCellV2 cell; 352a324d6f1SBharata B Rao QSIMPLEQ_ENTRY(DrconfCellQueue) entry; 353a324d6f1SBharata B Rao } DrconfCellQueue; 354a324d6f1SBharata B Rao 355a324d6f1SBharata B Rao static DrconfCellQueue * 356a324d6f1SBharata B Rao spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr, 357a324d6f1SBharata B Rao uint32_t drc_index, uint32_t aa_index, 358a324d6f1SBharata B Rao uint32_t flags) 35903d196b7SBharata B Rao { 360a324d6f1SBharata B Rao DrconfCellQueue *elem; 361a324d6f1SBharata B Rao 362a324d6f1SBharata B Rao elem = g_malloc0(sizeof(*elem)); 363a324d6f1SBharata B Rao elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs); 364a324d6f1SBharata B Rao elem->cell.base_addr = cpu_to_be64(base_addr); 365a324d6f1SBharata B Rao elem->cell.drc_index = cpu_to_be32(drc_index); 366a324d6f1SBharata B Rao elem->cell.aa_index = cpu_to_be32(aa_index); 367a324d6f1SBharata B Rao elem->cell.flags = cpu_to_be32(flags); 368a324d6f1SBharata B Rao 369a324d6f1SBharata B Rao return elem; 370a324d6f1SBharata B Rao } 371a324d6f1SBharata B Rao 37291335a5eSDavid Gibson static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt, 373a324d6f1SBharata B Rao int offset, MemoryDeviceInfoList *dimms) 3742a6593cbSAlexey Kardashevskiy { 3752a6593cbSAlexey Kardashevskiy MachineState *machine = MACHINE(spapr); 376cc941111SFabiano Rosas uint8_t *int_buf, *cur_index; 377a324d6f1SBharata B Rao int ret; 37803d196b7SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 379a324d6f1SBharata B Rao uint64_t addr, cur_addr, size; 380b0c14ec4SDavid Hildenbrand uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size); 381b0c14ec4SDavid Hildenbrand uint64_t mem_end = machine->device_memory->base + 382b0c14ec4SDavid Hildenbrand memory_region_size(&machine->device_memory->mr); 383cc941111SFabiano Rosas uint32_t node, buf_len, nr_entries = 0; 384ce2918cbSDavid Gibson SpaprDrc *drc; 385a324d6f1SBharata B Rao DrconfCellQueue *elem, *next; 386a324d6f1SBharata B Rao MemoryDeviceInfoList *info; 387a324d6f1SBharata B Rao QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue 388a324d6f1SBharata B Rao = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue); 389a324d6f1SBharata B Rao 390a324d6f1SBharata B Rao /* Entry to cover RAM and the gap area */ 391a324d6f1SBharata B Rao elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1, 392a324d6f1SBharata B Rao SPAPR_LMB_FLAGS_RESERVED | 393a324d6f1SBharata B Rao SPAPR_LMB_FLAGS_DRC_INVALID); 394a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 395a324d6f1SBharata B Rao nr_entries++; 396a324d6f1SBharata B Rao 397b0c14ec4SDavid Hildenbrand cur_addr = machine->device_memory->base; 398a324d6f1SBharata B Rao for (info = dimms; info; info = info->next) { 399a324d6f1SBharata B Rao PCDIMMDeviceInfo *di = info->value->u.dimm.data; 400a324d6f1SBharata B Rao 401a324d6f1SBharata B Rao addr = di->addr; 402a324d6f1SBharata B Rao size = di->size; 403a324d6f1SBharata B Rao node = di->node; 404a324d6f1SBharata B Rao 405ee3a71e3SShivaprasad G Bhat /* 406ee3a71e3SShivaprasad G Bhat * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The 407ee3a71e3SShivaprasad G Bhat * area is marked hotpluggable in the next iteration for the bigger 408ee3a71e3SShivaprasad G Bhat * chunk including the NVDIMM occupied area. 409ee3a71e3SShivaprasad G Bhat */ 410ee3a71e3SShivaprasad G Bhat if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM) 411ee3a71e3SShivaprasad G Bhat continue; 412ee3a71e3SShivaprasad G Bhat 413a324d6f1SBharata B Rao /* Entry for hot-pluggable area */ 414a324d6f1SBharata B Rao if (cur_addr < addr) { 415a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); 416a324d6f1SBharata B Rao g_assert(drc); 417a324d6f1SBharata B Rao elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size, 418a324d6f1SBharata B Rao cur_addr, spapr_drc_index(drc), -1, 0); 419a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 420a324d6f1SBharata B Rao nr_entries++; 421a324d6f1SBharata B Rao } 422a324d6f1SBharata B Rao 423a324d6f1SBharata B Rao /* Entry for DIMM */ 424a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size); 425a324d6f1SBharata B Rao g_assert(drc); 426a324d6f1SBharata B Rao elem = spapr_get_drconf_cell(size / lmb_size, addr, 427a324d6f1SBharata B Rao spapr_drc_index(drc), node, 4280911a60cSLeonardo Bras (SPAPR_LMB_FLAGS_ASSIGNED | 4290911a60cSLeonardo Bras SPAPR_LMB_FLAGS_HOTREMOVABLE)); 430a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 431a324d6f1SBharata B Rao nr_entries++; 432a324d6f1SBharata B Rao cur_addr = addr + size; 433a324d6f1SBharata B Rao } 434a324d6f1SBharata B Rao 435a324d6f1SBharata B Rao /* Entry for remaining hotpluggable area */ 436a324d6f1SBharata B Rao if (cur_addr < mem_end) { 437a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); 438a324d6f1SBharata B Rao g_assert(drc); 439a324d6f1SBharata B Rao elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size, 440a324d6f1SBharata B Rao cur_addr, spapr_drc_index(drc), -1, 0); 441a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 442a324d6f1SBharata B Rao nr_entries++; 443a324d6f1SBharata B Rao } 444a324d6f1SBharata B Rao 445a324d6f1SBharata B Rao buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t); 446a324d6f1SBharata B Rao int_buf = cur_index = g_malloc0(buf_len); 447a324d6f1SBharata B Rao *(uint32_t *)int_buf = cpu_to_be32(nr_entries); 448a324d6f1SBharata B Rao cur_index += sizeof(nr_entries); 449a324d6f1SBharata B Rao 450a324d6f1SBharata B Rao QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) { 451a324d6f1SBharata B Rao memcpy(cur_index, &elem->cell, sizeof(elem->cell)); 452a324d6f1SBharata B Rao cur_index += sizeof(elem->cell); 453a324d6f1SBharata B Rao QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry); 454a324d6f1SBharata B Rao g_free(elem); 455a324d6f1SBharata B Rao } 456a324d6f1SBharata B Rao 457a324d6f1SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len); 458a324d6f1SBharata B Rao g_free(int_buf); 459a324d6f1SBharata B Rao if (ret < 0) { 460a324d6f1SBharata B Rao return -1; 461a324d6f1SBharata B Rao } 462a324d6f1SBharata B Rao return 0; 463a324d6f1SBharata B Rao } 464a324d6f1SBharata B Rao 46591335a5eSDavid Gibson static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt, 466a324d6f1SBharata B Rao int offset, MemoryDeviceInfoList *dimms) 467a324d6f1SBharata B Rao { 468b0c14ec4SDavid Hildenbrand MachineState *machine = MACHINE(spapr); 469a324d6f1SBharata B Rao int i, ret; 470a324d6f1SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 4710c9269a5SDavid Hildenbrand uint32_t device_lmb_start = machine->device_memory->base / lmb_size; 472b0c14ec4SDavid Hildenbrand uint32_t nr_lmbs = (machine->device_memory->base + 473b0c14ec4SDavid Hildenbrand memory_region_size(&machine->device_memory->mr)) / 474d0e5a8f2SBharata B Rao lmb_size; 47503d196b7SBharata B Rao uint32_t *int_buf, *cur_index, buf_len; 47616c25aefSBharata B Rao 47716c25aefSBharata B Rao /* 478ef001f06SThomas Huth * Allocate enough buffer size to fit in ibm,dynamic-memory 479ef001f06SThomas Huth */ 480a324d6f1SBharata B Rao buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t); 48103d196b7SBharata B Rao cur_index = int_buf = g_malloc0(buf_len); 48203d196b7SBharata B Rao int_buf[0] = cpu_to_be32(nr_lmbs); 48303d196b7SBharata B Rao cur_index++; 48403d196b7SBharata B Rao for (i = 0; i < nr_lmbs; i++) { 485d0e5a8f2SBharata B Rao uint64_t addr = i * lmb_size; 48603d196b7SBharata B Rao uint32_t *dynamic_memory = cur_index; 48703d196b7SBharata B Rao 4880c9269a5SDavid Hildenbrand if (i >= device_lmb_start) { 489ce2918cbSDavid Gibson SpaprDrc *drc; 490d0e5a8f2SBharata B Rao 491fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i); 49203d196b7SBharata B Rao g_assert(drc); 49303d196b7SBharata B Rao 49403d196b7SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 49503d196b7SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 4960b55aa91SDavid Gibson dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc)); 49703d196b7SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 498f47bd1c8SIgor Mammedov dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr)); 499d0e5a8f2SBharata B Rao if (memory_region_present(get_system_memory(), addr)) { 50003d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED); 50103d196b7SBharata B Rao } else { 50203d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(0); 50303d196b7SBharata B Rao } 504d0e5a8f2SBharata B Rao } else { 505d0e5a8f2SBharata B Rao /* 506d0e5a8f2SBharata B Rao * LMB information for RMA, boot time RAM and gap b/n RAM and 5070c9269a5SDavid Hildenbrand * device memory region -- all these are marked as reserved 508d0e5a8f2SBharata B Rao * and as having no valid DRC. 509d0e5a8f2SBharata B Rao */ 510d0e5a8f2SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 511d0e5a8f2SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 512d0e5a8f2SBharata B Rao dynamic_memory[2] = cpu_to_be32(0); 513d0e5a8f2SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 514d0e5a8f2SBharata B Rao dynamic_memory[4] = cpu_to_be32(-1); 515d0e5a8f2SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED | 516d0e5a8f2SBharata B Rao SPAPR_LMB_FLAGS_DRC_INVALID); 517d0e5a8f2SBharata B Rao } 51803d196b7SBharata B Rao 51903d196b7SBharata B Rao cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE; 52003d196b7SBharata B Rao } 52103d196b7SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len); 522a324d6f1SBharata B Rao g_free(int_buf); 52303d196b7SBharata B Rao if (ret < 0) { 524a324d6f1SBharata B Rao return -1; 525a324d6f1SBharata B Rao } 526a324d6f1SBharata B Rao return 0; 527a324d6f1SBharata B Rao } 528a324d6f1SBharata B Rao 529a324d6f1SBharata B Rao /* 530a324d6f1SBharata B Rao * Adds ibm,dynamic-reconfiguration-memory node. 531a324d6f1SBharata B Rao * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation 532a324d6f1SBharata B Rao * of this device tree node. 533a324d6f1SBharata B Rao */ 53491335a5eSDavid Gibson static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr, 53591335a5eSDavid Gibson void *fdt) 536a324d6f1SBharata B Rao { 537a324d6f1SBharata B Rao MachineState *machine = MACHINE(spapr); 5380ee52012SDaniel Henrique Barboza int ret, offset; 539a324d6f1SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 5407abf9797SAnton Blanchard uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32), 5417abf9797SAnton Blanchard cpu_to_be32(lmb_size & 0xffffffff)}; 542a324d6f1SBharata B Rao MemoryDeviceInfoList *dimms = NULL; 543a324d6f1SBharata B Rao 544a324d6f1SBharata B Rao /* 5450c9269a5SDavid Hildenbrand * Don't create the node if there is no device memory 546a324d6f1SBharata B Rao */ 547a324d6f1SBharata B Rao if (machine->ram_size == machine->maxram_size) { 548a324d6f1SBharata B Rao return 0; 549a324d6f1SBharata B Rao } 550a324d6f1SBharata B Rao 551a324d6f1SBharata B Rao offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory"); 552a324d6f1SBharata B Rao 553a324d6f1SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size, 554a324d6f1SBharata B Rao sizeof(prop_lmb_size)); 555a324d6f1SBharata B Rao if (ret < 0) { 556a324d6f1SBharata B Rao return ret; 557a324d6f1SBharata B Rao } 558a324d6f1SBharata B Rao 559a324d6f1SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff); 560a324d6f1SBharata B Rao if (ret < 0) { 561a324d6f1SBharata B Rao return ret; 562a324d6f1SBharata B Rao } 563a324d6f1SBharata B Rao 564a324d6f1SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0); 565a324d6f1SBharata B Rao if (ret < 0) { 566a324d6f1SBharata B Rao return ret; 567a324d6f1SBharata B Rao } 568a324d6f1SBharata B Rao 569a324d6f1SBharata B Rao /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */ 5702cc0e2e8SDavid Hildenbrand dimms = qmp_memory_device_list(); 571a324d6f1SBharata B Rao if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) { 57291335a5eSDavid Gibson ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms); 573a324d6f1SBharata B Rao } else { 57491335a5eSDavid Gibson ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms); 575a324d6f1SBharata B Rao } 576a324d6f1SBharata B Rao qapi_free_MemoryDeviceInfoList(dimms); 577a324d6f1SBharata B Rao 578a324d6f1SBharata B Rao if (ret < 0) { 579a324d6f1SBharata B Rao return ret; 58003d196b7SBharata B Rao } 58103d196b7SBharata B Rao 5820ee52012SDaniel Henrique Barboza ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset); 583a324d6f1SBharata B Rao 58403d196b7SBharata B Rao return ret; 58503d196b7SBharata B Rao } 58603d196b7SBharata B Rao 58791335a5eSDavid Gibson static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt) 5886787d27bSMichael Roth { 589fa523f0dSDavid Gibson MachineState *machine = MACHINE(spapr); 590ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 59153018216SPaolo Bonzini hwaddr mem_start, node_size; 59253018216SPaolo Bonzini int i, nb_nodes = machine->numa_state->num_nodes; 59353018216SPaolo Bonzini NodeInfo *nodes = machine->numa_state->nodes; 59453018216SPaolo Bonzini 59553018216SPaolo Bonzini for (i = 0, mem_start = 0; i < nb_nodes; ++i) { 59653018216SPaolo Bonzini if (!nodes[i].node_mem) { 59753018216SPaolo Bonzini continue; 59853018216SPaolo Bonzini } 59953018216SPaolo Bonzini if (mem_start >= machine->ram_size) { 60053018216SPaolo Bonzini node_size = 0; 60153018216SPaolo Bonzini } else { 60253018216SPaolo Bonzini node_size = nodes[i].node_mem; 60353018216SPaolo Bonzini if (node_size > machine->ram_size - mem_start) { 60453018216SPaolo Bonzini node_size = machine->ram_size - mem_start; 60553018216SPaolo Bonzini } 60653018216SPaolo Bonzini } 60753018216SPaolo Bonzini if (!mem_start) { 60853018216SPaolo Bonzini /* spapr_machine_init() checks for rma_size <= node0_size 60953018216SPaolo Bonzini * already */ 610f1aa45ffSDaniel Henrique Barboza spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size); 61153018216SPaolo Bonzini mem_start += spapr->rma_size; 61253018216SPaolo Bonzini node_size -= spapr->rma_size; 61353018216SPaolo Bonzini } 61453018216SPaolo Bonzini for ( ; node_size; ) { 61553018216SPaolo Bonzini hwaddr sizetmp = pow2floor(node_size); 61653018216SPaolo Bonzini 61753018216SPaolo Bonzini /* mem_start != 0 here */ 61853018216SPaolo Bonzini if (ctzl(mem_start) < ctzl(sizetmp)) { 61953018216SPaolo Bonzini sizetmp = 1ULL << ctzl(mem_start); 62053018216SPaolo Bonzini } 62153018216SPaolo Bonzini 622f1aa45ffSDaniel Henrique Barboza spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp); 62353018216SPaolo Bonzini node_size -= sizetmp; 62453018216SPaolo Bonzini mem_start += sizetmp; 62553018216SPaolo Bonzini } 62653018216SPaolo Bonzini } 62753018216SPaolo Bonzini 6286787d27bSMichael Roth /* Generate ibm,dynamic-reconfiguration-memory node if required */ 629fa523f0dSDavid Gibson if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) { 630fa523f0dSDavid Gibson int ret; 631fa523f0dSDavid Gibson 6326787d27bSMichael Roth g_assert(smc->dr_lmb_enabled); 63391335a5eSDavid Gibson ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt); 634417ece33SMichael Roth if (ret) { 6359b6c1da5SDaniel Henrique Barboza return ret; 636417ece33SMichael Roth } 6376787d27bSMichael Roth } 6386787d27bSMichael Roth 63953018216SPaolo Bonzini return 0; 64053018216SPaolo Bonzini } 64153018216SPaolo Bonzini 64291335a5eSDavid Gibson static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset, 64353018216SPaolo Bonzini SpaprMachineState *spapr) 64453018216SPaolo Bonzini { 64553018216SPaolo Bonzini MachineState *ms = MACHINE(spapr); 64653018216SPaolo Bonzini PowerPCCPU *cpu = POWERPC_CPU(cs); 64753018216SPaolo Bonzini CPUPPCState *env = &cpu->env; 64853018216SPaolo Bonzini PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 64953018216SPaolo Bonzini int index = spapr_get_vcpu_id(cpu); 65053018216SPaolo Bonzini uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 65153018216SPaolo Bonzini 0xffffffff, 0xffffffff}; 65253018216SPaolo Bonzini uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() 65353018216SPaolo Bonzini : SPAPR_TIMEBASE_FREQ; 65453018216SPaolo Bonzini uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; 65553018216SPaolo Bonzini uint32_t page_sizes_prop[64]; 65653018216SPaolo Bonzini size_t page_sizes_prop_size; 65753018216SPaolo Bonzini unsigned int smp_threads = ms->smp.threads; 65853018216SPaolo Bonzini uint32_t vcpus_per_socket = smp_threads * ms->smp.cores; 65953018216SPaolo Bonzini uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; 66053018216SPaolo Bonzini int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu)); 66153018216SPaolo Bonzini SpaprDrc *drc; 66253018216SPaolo Bonzini int drc_index; 66353018216SPaolo Bonzini uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ]; 66453018216SPaolo Bonzini int i; 66553018216SPaolo Bonzini 66653018216SPaolo Bonzini drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index); 66753018216SPaolo Bonzini if (drc) { 66853018216SPaolo Bonzini drc_index = spapr_drc_index(drc); 66953018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index))); 6702a6593cbSAlexey Kardashevskiy } 6712a6593cbSAlexey Kardashevskiy 6722a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "reg", index))); 6732a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu"))); 6742a6593cbSAlexey Kardashevskiy 6752a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR]))); 6762a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size", 6772a6593cbSAlexey Kardashevskiy env->dcache_line_size))); 6782a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size", 6792a6593cbSAlexey Kardashevskiy env->dcache_line_size))); 6802a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size", 6812a6593cbSAlexey Kardashevskiy env->icache_line_size))); 6822a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size", 6832a6593cbSAlexey Kardashevskiy env->icache_line_size))); 6842a6593cbSAlexey Kardashevskiy 6852a6593cbSAlexey Kardashevskiy if (pcc->l1_dcache_size) { 6862a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size", 6872a6593cbSAlexey Kardashevskiy pcc->l1_dcache_size))); 6882a6593cbSAlexey Kardashevskiy } else { 6892a6593cbSAlexey Kardashevskiy warn_report("Unknown L1 dcache size for cpu"); 6902a6593cbSAlexey Kardashevskiy } 6912a6593cbSAlexey Kardashevskiy if (pcc->l1_icache_size) { 6922a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size", 6932a6593cbSAlexey Kardashevskiy pcc->l1_icache_size))); 6942a6593cbSAlexey Kardashevskiy } else { 6952a6593cbSAlexey Kardashevskiy warn_report("Unknown L1 icache size for cpu"); 6962a6593cbSAlexey Kardashevskiy } 6972a6593cbSAlexey Kardashevskiy 6982a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq))); 6992a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq))); 7002a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size))); 7012a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size))); 7022a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, offset, "status", "okay"))); 7032a6593cbSAlexey Kardashevskiy _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0))); 7042a6593cbSAlexey Kardashevskiy 70553018216SPaolo Bonzini if (env->spr_cb[SPR_PURR].oea_read) { 70653018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1))); 70753018216SPaolo Bonzini } 70853018216SPaolo Bonzini if (env->spr_cb[SPR_SPURR].oea_read) { 70953018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1))); 71053018216SPaolo Bonzini } 7115fe269b1SPaul Mackerras 71253018216SPaolo Bonzini if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) { 71353018216SPaolo Bonzini _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes", 71453018216SPaolo Bonzini segs, sizeof(segs)))); 71553018216SPaolo Bonzini } 7165fe269b1SPaul Mackerras 7175fe269b1SPaul Mackerras /* Advertise VSX (vector extensions) if available 7185fe269b1SPaul Mackerras * 1 == VMX / Altivec available 7195fe269b1SPaul Mackerras * 2 == VSX available 7205fe269b1SPaul Mackerras * 72153018216SPaolo Bonzini * Only CPUs for which we create core types in spapr_cpu_core.c 72253018216SPaolo Bonzini * are possible, and all of those have VMX */ 72353018216SPaolo Bonzini if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) { 72453018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2))); 72553018216SPaolo Bonzini } else { 72653018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1))); 72728e02042SDavid Gibson } 72853018216SPaolo Bonzini 729fb164994SDavid Gibson /* Advertise DFP (Decimal Floating Point) if available 7307db8a127SAlexey Kardashevskiy * 0 / no property == no DFP 7317db8a127SAlexey Kardashevskiy * 1 == DFP available */ 7327db8a127SAlexey Kardashevskiy if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) { 7337db8a127SAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1))); 73453018216SPaolo Bonzini } 7357db8a127SAlexey Kardashevskiy 7367db8a127SAlexey Kardashevskiy page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop, 7377db8a127SAlexey Kardashevskiy sizeof(page_sizes_prop)); 738fb164994SDavid Gibson if (page_sizes_prop_size) { 7397db8a127SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes", 74053018216SPaolo Bonzini page_sizes_prop, page_sizes_prop_size))); 74153018216SPaolo Bonzini } 7427db8a127SAlexey Kardashevskiy 74391335a5eSDavid Gibson spapr_dt_pa_features(spapr, cpu, fdt, offset); 74453018216SPaolo Bonzini 7457db8a127SAlexey Kardashevskiy _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", 7467db8a127SAlexey Kardashevskiy cs->cpu_index / vcpus_per_socket))); 7477db8a127SAlexey Kardashevskiy 74853018216SPaolo Bonzini _FDT((fdt_setprop(fdt, offset, "ibm,pft-size", 749fb164994SDavid Gibson pft_size_prop, sizeof(pft_size_prop)))); 7505fe269b1SPaul Mackerras 7515fe269b1SPaul Mackerras if (ms->numa_state->num_nodes > 1) { 7528f86a408SDaniel Henrique Barboza _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu)); 7535fe269b1SPaul Mackerras } 7545fe269b1SPaul Mackerras 7557db8a127SAlexey Kardashevskiy _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt)); 7567db8a127SAlexey Kardashevskiy 7577db8a127SAlexey Kardashevskiy if (pcc->radix_page_info) { 7587db8a127SAlexey Kardashevskiy for (i = 0; i < pcc->radix_page_info->count; i++) { 7597db8a127SAlexey Kardashevskiy radix_AP_encodings[i] = 7607db8a127SAlexey Kardashevskiy cpu_to_be32(pcc->radix_page_info->entries[i]); 7616010818cSAlexey Kardashevskiy } 7626010818cSAlexey Kardashevskiy _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings", 7636010818cSAlexey Kardashevskiy radix_AP_encodings, 7646010818cSAlexey Kardashevskiy pcc->radix_page_info->count * 7656010818cSAlexey Kardashevskiy sizeof(radix_AP_encodings[0])))); 7666010818cSAlexey Kardashevskiy } 7676010818cSAlexey Kardashevskiy 7686010818cSAlexey Kardashevskiy /* 7696010818cSAlexey Kardashevskiy * We set this property to let the guest know that it can use the large 7706010818cSAlexey Kardashevskiy * decrementer and its width in bits. 7716010818cSAlexey Kardashevskiy */ 7726010818cSAlexey Kardashevskiy if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF) 77353018216SPaolo Bonzini _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits", 77453018216SPaolo Bonzini pcc->lrg_decr_bits))); 77553018216SPaolo Bonzini } 77653018216SPaolo Bonzini 77791335a5eSDavid Gibson static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr) 77853018216SPaolo Bonzini { 77953018216SPaolo Bonzini CPUState **rev; 78053018216SPaolo Bonzini CPUState *cs; 78153018216SPaolo Bonzini int n_cpus; 78253018216SPaolo Bonzini int cpus_offset; 78353018216SPaolo Bonzini char *nodename; 78453018216SPaolo Bonzini int i; 78553018216SPaolo Bonzini 78653018216SPaolo Bonzini cpus_offset = fdt_add_subnode(fdt, 0, "cpus"); 78753018216SPaolo Bonzini _FDT(cpus_offset); 78853018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1))); 78953018216SPaolo Bonzini _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0))); 79053018216SPaolo Bonzini 79153018216SPaolo Bonzini /* 79253018216SPaolo Bonzini * We walk the CPUs in reverse order to ensure that CPU DT nodes 79353018216SPaolo Bonzini * created by fdt_add_subnode() end up in the right order in FDT 79453018216SPaolo Bonzini * for the guest kernel the enumerate the CPUs correctly. 79553018216SPaolo Bonzini * 79653018216SPaolo Bonzini * The CPU list cannot be traversed in reverse order, so we need 79753018216SPaolo Bonzini * to do extra work. 79853018216SPaolo Bonzini */ 79953018216SPaolo Bonzini n_cpus = 0; 80053018216SPaolo Bonzini rev = NULL; 8010da6f3feSBharata B Rao CPU_FOREACH(cs) { 8020da6f3feSBharata B Rao rev = g_renew(CPUState *, rev, n_cpus + 1); 8030da6f3feSBharata B Rao rev[n_cpus++] = cs; 8040da6f3feSBharata B Rao } 8050da6f3feSBharata B Rao 8060da6f3feSBharata B Rao for (i = n_cpus - 1; i >= 0; i--) { 8070da6f3feSBharata B Rao CPUState *cs = rev[i]; 8080da6f3feSBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(cs); 8090da6f3feSBharata B Rao int index = spapr_get_vcpu_id(cpu); 8100da6f3feSBharata B Rao DeviceClass *dc = DEVICE_GET_CLASS(cs); 8110da6f3feSBharata B Rao int offset; 8120da6f3feSBharata B Rao 8130da6f3feSBharata B Rao if (!spapr_is_thread0_in_vcore(spapr, cpu)) { 81422419c2aSDavid Gibson continue; 8150da6f3feSBharata B Rao } 8160da6f3feSBharata B Rao 8170da6f3feSBharata B Rao nodename = g_strdup_printf("%s@%x", dc->fw_name, index); 8180da6f3feSBharata B Rao offset = fdt_add_subnode(fdt, cpus_offset, nodename); 8190da6f3feSBharata B Rao g_free(nodename); 8200da6f3feSBharata B Rao _FDT(offset); 82191335a5eSDavid Gibson spapr_dt_cpu(cs, fdt, offset, spapr); 8220da6f3feSBharata B Rao } 8230da6f3feSBharata B Rao 8240da6f3feSBharata B Rao g_free(rev); 8250da6f3feSBharata B Rao } 8260da6f3feSBharata B Rao 82791335a5eSDavid Gibson static int spapr_dt_rng(void *fdt) 8280da6f3feSBharata B Rao { 8290da6f3feSBharata B Rao int node; 8300da6f3feSBharata B Rao int ret; 8310da6f3feSBharata B Rao 8320da6f3feSBharata B Rao node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities"); 8330da6f3feSBharata B Rao if (node <= 0) { 8340da6f3feSBharata B Rao return -1; 8350da6f3feSBharata B Rao } 8360da6f3feSBharata B Rao ret = fdt_setprop_string(fdt, node, "device_type", 8370da6f3feSBharata B Rao "ibm,platform-facilities"); 8380da6f3feSBharata B Rao ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1); 8390da6f3feSBharata B Rao ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0); 8400da6f3feSBharata B Rao 8410da6f3feSBharata B Rao node = fdt_add_subnode(fdt, node, "ibm,random-v1"); 8420da6f3feSBharata B Rao if (node <= 0) { 8430da6f3feSBharata B Rao return -1; 8440da6f3feSBharata B Rao } 8450da6f3feSBharata B Rao ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random"); 8460da6f3feSBharata B Rao 8470da6f3feSBharata B Rao return ret ? -1 : 0; 8480da6f3feSBharata B Rao } 8490da6f3feSBharata B Rao 850ce2918cbSDavid Gibson static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) 8513f5dabceSDavid Gibson { 852fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 8533f5dabceSDavid Gibson int rtas; 8543f5dabceSDavid Gibson GString *hypertas = g_string_sized_new(256); 8553f5dabceSDavid Gibson GString *qemu_hypertas = g_string_sized_new(256); 8560c9269a5SDavid Hildenbrand uint64_t max_device_addr = MACHINE(spapr)->device_memory->base + 857b0c14ec4SDavid Hildenbrand memory_region_size(&MACHINE(spapr)->device_memory->mr); 8583f5dabceSDavid Gibson uint32_t lrdr_capacity[] = { 8590c9269a5SDavid Hildenbrand cpu_to_be32(max_device_addr >> 32), 8600c9269a5SDavid Hildenbrand cpu_to_be32(max_device_addr & 0xffffffff), 8617abf9797SAnton Blanchard cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32), 8627abf9797SAnton Blanchard cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff), 863fe6b6346SLike Xu cpu_to_be32(ms->smp.max_cpus / ms->smp.threads), 8643f5dabceSDavid Gibson }; 8653f5dabceSDavid Gibson 8663f5dabceSDavid Gibson _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); 8673f5dabceSDavid Gibson 8683f5dabceSDavid Gibson /* hypertas */ 8693f5dabceSDavid Gibson add_str(hypertas, "hcall-pft"); 8703f5dabceSDavid Gibson add_str(hypertas, "hcall-term"); 8713f5dabceSDavid Gibson add_str(hypertas, "hcall-dabr"); 8723f5dabceSDavid Gibson add_str(hypertas, "hcall-interrupt"); 8733f5dabceSDavid Gibson add_str(hypertas, "hcall-tce"); 8743f5dabceSDavid Gibson add_str(hypertas, "hcall-vio"); 8753f5dabceSDavid Gibson add_str(hypertas, "hcall-splpar"); 87610741314SNicholas Piggin add_str(hypertas, "hcall-join"); 8773f5dabceSDavid Gibson add_str(hypertas, "hcall-bulk"); 8783f5dabceSDavid Gibson add_str(hypertas, "hcall-set-mode"); 8793f5dabceSDavid Gibson add_str(hypertas, "hcall-sprg0"); 8803f5dabceSDavid Gibson add_str(hypertas, "hcall-copy"); 8813f5dabceSDavid Gibson add_str(hypertas, "hcall-debug"); 882c24ba3d0SLaurent Vivier add_str(hypertas, "hcall-vphn"); 8833f5dabceSDavid Gibson add_str(qemu_hypertas, "hcall-memop1"); 8843f5dabceSDavid Gibson 8853f5dabceSDavid Gibson if (!kvm_enabled() || kvmppc_spapr_use_multitce()) { 8863f5dabceSDavid Gibson add_str(hypertas, "hcall-multi-tce"); 8873f5dabceSDavid Gibson } 88830f4b05bSDavid Gibson 88930f4b05bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 89030f4b05bSDavid Gibson add_str(hypertas, "hcall-hpt-resize"); 89130f4b05bSDavid Gibson } 89230f4b05bSDavid Gibson 8933f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions", 8943f5dabceSDavid Gibson hypertas->str, hypertas->len)); 8953f5dabceSDavid Gibson g_string_free(hypertas, TRUE); 8963f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions", 8973f5dabceSDavid Gibson qemu_hypertas->str, qemu_hypertas->len)); 8983f5dabceSDavid Gibson g_string_free(qemu_hypertas, TRUE); 8993f5dabceSDavid Gibson 9001eee9950SDaniel Henrique Barboza spapr_numa_write_rtas_dt(spapr, fdt, rtas); 901da9f80fbSSerhii Popovych 9020e236d34SNicholas Piggin /* 9030e236d34SNicholas Piggin * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log, 9040e236d34SNicholas Piggin * and 16 bytes per CPU for system reset error log plus an extra 8 bytes. 9050e236d34SNicholas Piggin * 9060e236d34SNicholas Piggin * The system reset requirements are driven by existing Linux and PowerVM 9070e236d34SNicholas Piggin * implementation which (contrary to PAPR) saves r3 in the error log 9080e236d34SNicholas Piggin * structure like machine check, so Linux expects to find the saved r3 9090e236d34SNicholas Piggin * value at the address in r3 upon FWNMI-enabled sreset interrupt (and 9100e236d34SNicholas Piggin * does not look at the error value). 9110e236d34SNicholas Piggin * 9120e236d34SNicholas Piggin * System reset interrupts are not subject to interlock like machine 9130e236d34SNicholas Piggin * check, so this memory area could be corrupted if the sreset is 9140e236d34SNicholas Piggin * interrupted by a machine check (or vice versa) if it was shared. To 9150e236d34SNicholas Piggin * prevent this, system reset uses per-CPU areas for the sreset save 9160e236d34SNicholas Piggin * area. A system reset that interrupts a system reset handler could 9170e236d34SNicholas Piggin * still overwrite this area, but Linux doesn't try to recover in that 9180e236d34SNicholas Piggin * case anyway. 9190e236d34SNicholas Piggin * 9200e236d34SNicholas Piggin * The extra 8 bytes is required because Linux's FWNMI error log check 9210e236d34SNicholas Piggin * is off-by-one. 9220e236d34SNicholas Piggin */ 9230e236d34SNicholas Piggin _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_ERROR_LOG_MAX + 9240e236d34SNicholas Piggin ms->smp.max_cpus * sizeof(uint64_t)*2 + sizeof(uint64_t))); 9253f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max", 9263f5dabceSDavid Gibson RTAS_ERROR_LOG_MAX)); 9273f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate", 9283f5dabceSDavid Gibson RTAS_EVENT_SCAN_RATE)); 9293f5dabceSDavid Gibson 9304f441474SDavid Gibson g_assert(msi_nonbroken); 9313f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0)); 9323f5dabceSDavid Gibson 9333f5dabceSDavid Gibson /* 9343f5dabceSDavid Gibson * According to PAPR, rtas ibm,os-term does not guarantee a return 9353f5dabceSDavid Gibson * back to the guest cpu. 9363f5dabceSDavid Gibson * 9373f5dabceSDavid Gibson * While an additional ibm,extended-os-term property indicates 9383f5dabceSDavid Gibson * that rtas call return will always occur. Set this property. 9393f5dabceSDavid Gibson */ 9403f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0)); 9413f5dabceSDavid Gibson 9423f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity", 9433f5dabceSDavid Gibson lrdr_capacity, sizeof(lrdr_capacity))); 9443f5dabceSDavid Gibson 9453f5dabceSDavid Gibson spapr_dt_rtas_tokens(fdt, rtas); 9463f5dabceSDavid Gibson } 9473f5dabceSDavid Gibson 948db592b5bSCédric Le Goater /* 949db592b5bSCédric Le Goater * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU 950db592b5bSCédric Le Goater * and the XIVE features that the guest may request and thus the valid 951db592b5bSCédric Le Goater * values for bytes 23..26 of option vector 5: 952db592b5bSCédric Le Goater */ 953ce2918cbSDavid Gibson static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt, 954db592b5bSCédric Le Goater int chosen) 9559fb4541fSSam Bobroff { 956545d6e2bSSuraj Jitindar Singh PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); 957545d6e2bSSuraj Jitindar Singh 958f2b14e3aSCédric Le Goater char val[2 * 4] = { 959ca62823bSDavid Gibson 23, 0x00, /* XICS / XIVE mode */ 9609fb4541fSSam Bobroff 24, 0x00, /* Hash/Radix, filled in below. */ 9619fb4541fSSam Bobroff 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ 9629fb4541fSSam Bobroff 26, 0x40, /* Radix options: GTSE == yes. */ 9639fb4541fSSam Bobroff }; 9649fb4541fSSam Bobroff 965ca62823bSDavid Gibson if (spapr->irq->xics && spapr->irq->xive) { 966ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_BOTH; 967ca62823bSDavid Gibson } else if (spapr->irq->xive) { 968ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_EXPLOIT; 969ca62823bSDavid Gibson } else { 970ca62823bSDavid Gibson assert(spapr->irq->xics); 971ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_LEGACY; 972ca62823bSDavid Gibson } 973ca62823bSDavid Gibson 9747abd43baSSuraj Jitindar Singh if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0, 9757abd43baSSuraj Jitindar Singh first_ppc_cpu->compat_pvr)) { 976db592b5bSCédric Le Goater /* 977db592b5bSCédric Le Goater * If we're in a pre POWER9 compat mode then the guest should 978db592b5bSCédric Le Goater * do hash and use the legacy interrupt mode 979db592b5bSCédric Le Goater */ 980ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */ 9817abd43baSSuraj Jitindar Singh val[3] = 0x00; /* Hash */ 9827abd43baSSuraj Jitindar Singh } else if (kvm_enabled()) { 9839fb4541fSSam Bobroff if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) { 984f2b14e3aSCédric Le Goater val[3] = 0x80; /* OV5_MMU_BOTH */ 9859fb4541fSSam Bobroff } else if (kvmppc_has_cap_mmu_radix()) { 986f2b14e3aSCédric Le Goater val[3] = 0x40; /* OV5_MMU_RADIX_300 */ 9879fb4541fSSam Bobroff } else { 988f2b14e3aSCédric Le Goater val[3] = 0x00; /* Hash */ 9899fb4541fSSam Bobroff } 9909fb4541fSSam Bobroff } else { 9917abd43baSSuraj Jitindar Singh /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */ 992f2b14e3aSCédric Le Goater val[3] = 0xC0; 993545d6e2bSSuraj Jitindar Singh } 9949fb4541fSSam Bobroff _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support", 9959fb4541fSSam Bobroff val, sizeof(val))); 9969fb4541fSSam Bobroff } 9979fb4541fSSam Bobroff 9981e0e1108SDavid Gibson static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) 9997c866c6aSDavid Gibson { 10007c866c6aSDavid Gibson MachineState *machine = MACHINE(spapr); 10016c3829a2SAlexey Kardashevskiy SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 10027c866c6aSDavid Gibson int chosen; 10031e0e1108SDavid Gibson 10041e0e1108SDavid Gibson _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); 10051e0e1108SDavid Gibson 10061e0e1108SDavid Gibson if (reset) { 10077c866c6aSDavid Gibson const char *boot_device = machine->boot_order; 10087c866c6aSDavid Gibson char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); 10097c866c6aSDavid Gibson size_t cb = 0; 1010907aac2fSMark Cave-Ayland char *bootlist = get_boot_devices_list(&cb); 10117c866c6aSDavid Gibson 10125ced7895SAlexey Kardashevskiy if (machine->kernel_cmdline && machine->kernel_cmdline[0]) { 10135ced7895SAlexey Kardashevskiy _FDT(fdt_setprop_string(fdt, chosen, "bootargs", 10145ced7895SAlexey Kardashevskiy machine->kernel_cmdline)); 10155ced7895SAlexey Kardashevskiy } 10161e0e1108SDavid Gibson 10175ced7895SAlexey Kardashevskiy if (spapr->initrd_size) { 10187c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start", 10197c866c6aSDavid Gibson spapr->initrd_base)); 10207c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end", 10217c866c6aSDavid Gibson spapr->initrd_base + spapr->initrd_size)); 10225ced7895SAlexey Kardashevskiy } 10237c866c6aSDavid Gibson 10247c866c6aSDavid Gibson if (spapr->kernel_size) { 102587262806SAlexey Kardashevskiy uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr), 10267c866c6aSDavid Gibson cpu_to_be64(spapr->kernel_size) }; 10277c866c6aSDavid Gibson 10287c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel", 10297c866c6aSDavid Gibson &kprop, sizeof(kprop))); 10307c866c6aSDavid Gibson if (spapr->kernel_le) { 10317c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0)); 10327c866c6aSDavid Gibson } 10337c866c6aSDavid Gibson } 10347c866c6aSDavid Gibson if (boot_menu) { 10357c866c6aSDavid Gibson _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu))); 10367c866c6aSDavid Gibson } 10377c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width)); 10387c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height)); 10397c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth)); 10407c866c6aSDavid Gibson 10417c866c6aSDavid Gibson if (cb && bootlist) { 10427c866c6aSDavid Gibson int i; 10437c866c6aSDavid Gibson 10447c866c6aSDavid Gibson for (i = 0; i < cb; i++) { 10457c866c6aSDavid Gibson if (bootlist[i] == '\n') { 10467c866c6aSDavid Gibson bootlist[i] = ' '; 10477c866c6aSDavid Gibson } 10487c866c6aSDavid Gibson } 10497c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist)); 10507c866c6aSDavid Gibson } 10517c866c6aSDavid Gibson 10527c866c6aSDavid Gibson if (boot_device && strlen(boot_device)) { 10537c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device)); 10547c866c6aSDavid Gibson } 10557c866c6aSDavid Gibson 10567c866c6aSDavid Gibson if (!spapr->has_graphics && stdout_path) { 105790ee4e01SNikunj A Dadhania /* 10581e0e1108SDavid Gibson * "linux,stdout-path" and "stdout" properties are 10591e0e1108SDavid Gibson * deprecated by linux kernel. New platforms should only 10601e0e1108SDavid Gibson * use the "stdout-path" property. Set the new property 10611e0e1108SDavid Gibson * and continue using older property to remain compatible 10621e0e1108SDavid Gibson * with the existing firmware. 106390ee4e01SNikunj A Dadhania */ 10647c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path)); 106590ee4e01SNikunj A Dadhania _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path)); 10667c866c6aSDavid Gibson } 10677c866c6aSDavid Gibson 10681e0e1108SDavid Gibson /* 10691e0e1108SDavid Gibson * We can deal with BAR reallocation just fine, advertise it 10701e0e1108SDavid Gibson * to the guest 10711e0e1108SDavid Gibson */ 10726c3829a2SAlexey Kardashevskiy if (smc->linux_pci_probe) { 10736c3829a2SAlexey Kardashevskiy _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0)); 10746c3829a2SAlexey Kardashevskiy } 10756c3829a2SAlexey Kardashevskiy 1076db592b5bSCédric Le Goater spapr_dt_ov5_platform_support(spapr, fdt, chosen); 10779fb4541fSSam Bobroff 10787c866c6aSDavid Gibson g_free(stdout_path); 10797c866c6aSDavid Gibson g_free(bootlist); 10807c866c6aSDavid Gibson } 10817c866c6aSDavid Gibson 108291335a5eSDavid Gibson _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5")); 10831e0e1108SDavid Gibson } 10841e0e1108SDavid Gibson 1085ce2918cbSDavid Gibson static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt) 1086fca5f2dcSDavid Gibson { 1087fca5f2dcSDavid Gibson /* The /hypervisor node isn't in PAPR - this is a hack to allow PR 1088fca5f2dcSDavid Gibson * KVM to work under pHyp with some guest co-operation */ 1089fca5f2dcSDavid Gibson int hypervisor; 1090fca5f2dcSDavid Gibson uint8_t hypercall[16]; 1091fca5f2dcSDavid Gibson 1092fca5f2dcSDavid Gibson _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor")); 1093fca5f2dcSDavid Gibson /* indicate KVM hypercall interface */ 1094fca5f2dcSDavid Gibson _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm")); 1095fca5f2dcSDavid Gibson if (kvmppc_has_cap_fixup_hcalls()) { 1096fca5f2dcSDavid Gibson /* 1097fca5f2dcSDavid Gibson * Older KVM versions with older guest kernels were broken 1098fca5f2dcSDavid Gibson * with the magic page, don't allow the guest to map it. 1099fca5f2dcSDavid Gibson */ 1100fca5f2dcSDavid Gibson if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall, 1101fca5f2dcSDavid Gibson sizeof(hypercall))) { 1102fca5f2dcSDavid Gibson _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions", 1103fca5f2dcSDavid Gibson hypercall, sizeof(hypercall))); 1104fca5f2dcSDavid Gibson } 1105fca5f2dcSDavid Gibson } 1106fca5f2dcSDavid Gibson } 1107fca5f2dcSDavid Gibson 11080c21e073SDavid Gibson void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space) 110953018216SPaolo Bonzini { 1110c86c1affSDaniel Henrique Barboza MachineState *machine = MACHINE(spapr); 11113c0c47e3SDavid Gibson MachineClass *mc = MACHINE_GET_CLASS(machine); 1112ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 11137c866c6aSDavid Gibson int ret; 111453018216SPaolo Bonzini void *fdt; 1115ce2918cbSDavid Gibson SpaprPhbState *phb; 1116398a0bd5SDavid Gibson char *buf; 111753018216SPaolo Bonzini 111897b32a6aSDavid Gibson fdt = g_malloc0(space); 111997b32a6aSDavid Gibson _FDT((fdt_create_empty_tree(fdt, space))); 112053018216SPaolo Bonzini 1121398a0bd5SDavid Gibson /* Root node */ 1122398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp")); 1123398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)")); 1124398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries")); 1125398a0bd5SDavid Gibson 11260a794529SDavid Gibson /* Guest UUID & Name*/ 1127398a0bd5SDavid Gibson buf = qemu_uuid_unparse_strdup(&qemu_uuid); 1128398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf)); 1129398a0bd5SDavid Gibson if (qemu_uuid_set) { 1130398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "system-id", buf)); 1131398a0bd5SDavid Gibson } 1132398a0bd5SDavid Gibson g_free(buf); 1133398a0bd5SDavid Gibson 1134398a0bd5SDavid Gibson if (qemu_get_vm_name()) { 1135398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name", 1136398a0bd5SDavid Gibson qemu_get_vm_name())); 1137398a0bd5SDavid Gibson } 1138398a0bd5SDavid Gibson 11390a794529SDavid Gibson /* Host Model & Serial Number */ 11400a794529SDavid Gibson if (spapr->host_model) { 11410a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model)); 11420a794529SDavid Gibson } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) { 11430a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-model", buf)); 11440a794529SDavid Gibson g_free(buf); 11450a794529SDavid Gibson } 11460a794529SDavid Gibson 11470a794529SDavid Gibson if (spapr->host_serial) { 11480a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial)); 11490a794529SDavid Gibson } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) { 11500a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf)); 11510a794529SDavid Gibson g_free(buf); 11520a794529SDavid Gibson } 11530a794529SDavid Gibson 1154398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2)); 1155398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); 115653018216SPaolo Bonzini 1157fc7e0765SDavid Gibson /* /interrupt controller */ 115805289273SDavid Gibson spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC); 1159fc7e0765SDavid Gibson 116091335a5eSDavid Gibson ret = spapr_dt_memory(spapr, fdt); 1161e8f986fcSBharata B Rao if (ret < 0) { 1162ce9863b7SCédric Le Goater error_report("couldn't setup memory nodes in fdt"); 1163e8f986fcSBharata B Rao exit(1); 116453018216SPaolo Bonzini } 116553018216SPaolo Bonzini 1166bf5a6696SDavid Gibson /* /vdevice */ 1167bf5a6696SDavid Gibson spapr_dt_vdevice(spapr->vio_bus, fdt); 116853018216SPaolo Bonzini 11694d9392beSThomas Huth if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) { 117091335a5eSDavid Gibson ret = spapr_dt_rng(fdt); 11714d9392beSThomas Huth if (ret < 0) { 1172ce9863b7SCédric Le Goater error_report("could not set up rng device in the fdt"); 11734d9392beSThomas Huth exit(1); 11744d9392beSThomas Huth } 11754d9392beSThomas Huth } 11764d9392beSThomas Huth 117753018216SPaolo Bonzini QLIST_FOREACH(phb, &spapr->phbs, list) { 11788cbe71ecSDavid Gibson ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL); 117953018216SPaolo Bonzini if (ret < 0) { 1180da34fed7SThomas Huth error_report("couldn't setup PCI devices in fdt"); 118153018216SPaolo Bonzini exit(1); 118253018216SPaolo Bonzini } 1183da34fed7SThomas Huth } 118453018216SPaolo Bonzini 118591335a5eSDavid Gibson spapr_dt_cpus(fdt, spapr); 118653018216SPaolo Bonzini 1187c20d332aSBharata B Rao if (smc->dr_lmb_enabled) { 11889e7d38e8SDavid Gibson _FDT(spapr_dt_drc(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB)); 1189c20d332aSBharata B Rao } 1190c20d332aSBharata B Rao 1191c5514d0eSIgor Mammedov if (mc->has_hotpluggable_cpus) { 1192af81cf32SBharata B Rao int offset = fdt_path_offset(fdt, "/cpus"); 11939e7d38e8SDavid Gibson ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU); 1194af81cf32SBharata B Rao if (ret < 0) { 1195af81cf32SBharata B Rao error_report("Couldn't set up CPU DR device tree properties"); 1196af81cf32SBharata B Rao exit(1); 1197af81cf32SBharata B Rao } 1198af81cf32SBharata B Rao } 1199af81cf32SBharata B Rao 1200ffb1e275SDavid Gibson /* /event-sources */ 1201ffbb1705SMichael Roth spapr_dt_events(spapr, fdt); 1202ffb1e275SDavid Gibson 12033f5dabceSDavid Gibson /* /rtas */ 12043f5dabceSDavid Gibson spapr_dt_rtas(spapr, fdt); 12053f5dabceSDavid Gibson 12067c866c6aSDavid Gibson /* /chosen */ 12071e0e1108SDavid Gibson spapr_dt_chosen(spapr, fdt, reset); 1208cf6e5223SDavid Gibson 1209fca5f2dcSDavid Gibson /* /hypervisor */ 1210fca5f2dcSDavid Gibson if (kvm_enabled()) { 1211fca5f2dcSDavid Gibson spapr_dt_hypervisor(spapr, fdt); 1212fca5f2dcSDavid Gibson } 1213fca5f2dcSDavid Gibson 1214cf6e5223SDavid Gibson /* Build memory reserve map */ 1215a49f62b9SAlexey Kardashevskiy if (reset) { 1216cf6e5223SDavid Gibson if (spapr->kernel_size) { 121787262806SAlexey Kardashevskiy _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr, 121887262806SAlexey Kardashevskiy spapr->kernel_size))); 1219cf6e5223SDavid Gibson } 1220cf6e5223SDavid Gibson if (spapr->initrd_size) { 1221a49f62b9SAlexey Kardashevskiy _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, 1222a49f62b9SAlexey Kardashevskiy spapr->initrd_size))); 1223a49f62b9SAlexey Kardashevskiy } 1224cf6e5223SDavid Gibson } 1225cf6e5223SDavid Gibson 12263998ccd0SNathan Fontenot if (smc->dr_phb_enabled) { 12279e7d38e8SDavid Gibson ret = spapr_dt_drc(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_PHB); 12283998ccd0SNathan Fontenot if (ret < 0) { 12293998ccd0SNathan Fontenot error_report("Couldn't set up PHB DR device tree properties"); 12303998ccd0SNathan Fontenot exit(1); 12313998ccd0SNathan Fontenot } 12323998ccd0SNathan Fontenot } 12333998ccd0SNathan Fontenot 1234ee3a71e3SShivaprasad G Bhat /* NVDIMM devices */ 1235ee3a71e3SShivaprasad G Bhat if (mc->nvdimm_supported) { 1236f1aa45ffSDaniel Henrique Barboza spapr_dt_persistent_memory(spapr, fdt); 1237ee3a71e3SShivaprasad G Bhat } 1238ee3a71e3SShivaprasad G Bhat 1239997b6cfcSDavid Gibson return fdt; 124053018216SPaolo Bonzini } 124153018216SPaolo Bonzini 124253018216SPaolo Bonzini static uint64_t translate_kernel_address(void *opaque, uint64_t addr) 124353018216SPaolo Bonzini { 124487262806SAlexey Kardashevskiy SpaprMachineState *spapr = opaque; 124587262806SAlexey Kardashevskiy 124687262806SAlexey Kardashevskiy return (addr & 0x0fffffff) + spapr->kernel_addr; 124753018216SPaolo Bonzini } 124853018216SPaolo Bonzini 12491d1be34dSDavid Gibson static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, 12501d1be34dSDavid Gibson PowerPCCPU *cpu) 125153018216SPaolo Bonzini { 125253018216SPaolo Bonzini CPUPPCState *env = &cpu->env; 125353018216SPaolo Bonzini 12548d04fb55SJan Kiszka /* The TCG path should also be holding the BQL at this point */ 12558d04fb55SJan Kiszka g_assert(qemu_mutex_iothread_locked()); 12568d04fb55SJan Kiszka 125753018216SPaolo Bonzini if (msr_pr) { 125853018216SPaolo Bonzini hcall_dprintf("Hypercall made with MSR[PR]=1\n"); 125953018216SPaolo Bonzini env->gpr[3] = H_PRIVILEGE; 126053018216SPaolo Bonzini } else { 126153018216SPaolo Bonzini env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); 126253018216SPaolo Bonzini } 126353018216SPaolo Bonzini } 126453018216SPaolo Bonzini 126500fd075eSBenjamin Herrenschmidt struct LPCRSyncState { 126600fd075eSBenjamin Herrenschmidt target_ulong value; 126700fd075eSBenjamin Herrenschmidt target_ulong mask; 126800fd075eSBenjamin Herrenschmidt }; 126900fd075eSBenjamin Herrenschmidt 127000fd075eSBenjamin Herrenschmidt static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg) 127100fd075eSBenjamin Herrenschmidt { 127200fd075eSBenjamin Herrenschmidt struct LPCRSyncState *s = arg.host_ptr; 127300fd075eSBenjamin Herrenschmidt PowerPCCPU *cpu = POWERPC_CPU(cs); 127400fd075eSBenjamin Herrenschmidt CPUPPCState *env = &cpu->env; 127500fd075eSBenjamin Herrenschmidt target_ulong lpcr; 127600fd075eSBenjamin Herrenschmidt 127700fd075eSBenjamin Herrenschmidt cpu_synchronize_state(cs); 127800fd075eSBenjamin Herrenschmidt lpcr = env->spr[SPR_LPCR]; 127900fd075eSBenjamin Herrenschmidt lpcr &= ~s->mask; 128000fd075eSBenjamin Herrenschmidt lpcr |= s->value; 128100fd075eSBenjamin Herrenschmidt ppc_store_lpcr(cpu, lpcr); 128200fd075eSBenjamin Herrenschmidt } 128300fd075eSBenjamin Herrenschmidt 128400fd075eSBenjamin Herrenschmidt void spapr_set_all_lpcrs(target_ulong value, target_ulong mask) 128500fd075eSBenjamin Herrenschmidt { 128600fd075eSBenjamin Herrenschmidt CPUState *cs; 128700fd075eSBenjamin Herrenschmidt struct LPCRSyncState s = { 128800fd075eSBenjamin Herrenschmidt .value = value, 128900fd075eSBenjamin Herrenschmidt .mask = mask 129000fd075eSBenjamin Herrenschmidt }; 129100fd075eSBenjamin Herrenschmidt CPU_FOREACH(cs) { 129200fd075eSBenjamin Herrenschmidt run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s)); 129300fd075eSBenjamin Herrenschmidt } 129400fd075eSBenjamin Herrenschmidt } 129500fd075eSBenjamin Herrenschmidt 129679825f4dSBenjamin Herrenschmidt static void spapr_get_pate(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry) 12979861bb3eSSuraj Jitindar Singh { 1298ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 12999861bb3eSSuraj Jitindar Singh 130079825f4dSBenjamin Herrenschmidt /* Copy PATE1:GR into PATE0:HR */ 130179825f4dSBenjamin Herrenschmidt entry->dw0 = spapr->patb_entry & PATE0_HR; 130279825f4dSBenjamin Herrenschmidt entry->dw1 = spapr->patb_entry; 13039861bb3eSSuraj Jitindar Singh } 13049861bb3eSSuraj Jitindar Singh 1305e6b8fd24SSamuel Mendoza-Jonas #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) 1306e6b8fd24SSamuel Mendoza-Jonas #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) 1307e6b8fd24SSamuel Mendoza-Jonas #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) 1308e6b8fd24SSamuel Mendoza-Jonas #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY)) 1309e6b8fd24SSamuel Mendoza-Jonas #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY)) 1310e6b8fd24SSamuel Mendoza-Jonas 1311715c5407SDavid Gibson /* 1312715c5407SDavid Gibson * Get the fd to access the kernel htab, re-opening it if necessary 1313715c5407SDavid Gibson */ 1314ce2918cbSDavid Gibson static int get_htab_fd(SpaprMachineState *spapr) 1315715c5407SDavid Gibson { 131614b0d748SGreg Kurz Error *local_err = NULL; 131714b0d748SGreg Kurz 1318715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1319715c5407SDavid Gibson return spapr->htab_fd; 1320715c5407SDavid Gibson } 1321715c5407SDavid Gibson 132214b0d748SGreg Kurz spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err); 1323715c5407SDavid Gibson if (spapr->htab_fd < 0) { 132414b0d748SGreg Kurz error_report_err(local_err); 1325715c5407SDavid Gibson } 1326715c5407SDavid Gibson 1327715c5407SDavid Gibson return spapr->htab_fd; 1328715c5407SDavid Gibson } 1329715c5407SDavid Gibson 1330ce2918cbSDavid Gibson void close_htab_fd(SpaprMachineState *spapr) 1331715c5407SDavid Gibson { 1332715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1333715c5407SDavid Gibson close(spapr->htab_fd); 1334715c5407SDavid Gibson } 1335715c5407SDavid Gibson spapr->htab_fd = -1; 1336715c5407SDavid Gibson } 1337715c5407SDavid Gibson 1338e57ca75cSDavid Gibson static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp) 1339e57ca75cSDavid Gibson { 1340ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1341e57ca75cSDavid Gibson 1342e57ca75cSDavid Gibson return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1; 1343e57ca75cSDavid Gibson } 1344e57ca75cSDavid Gibson 13451ec26c75SGreg Kurz static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp) 13461ec26c75SGreg Kurz { 1347ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 13481ec26c75SGreg Kurz 13491ec26c75SGreg Kurz assert(kvm_enabled()); 13501ec26c75SGreg Kurz 13511ec26c75SGreg Kurz if (!spapr->htab) { 13521ec26c75SGreg Kurz return 0; 13531ec26c75SGreg Kurz } 13541ec26c75SGreg Kurz 13551ec26c75SGreg Kurz return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18); 13561ec26c75SGreg Kurz } 13571ec26c75SGreg Kurz 1358e57ca75cSDavid Gibson static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp, 1359e57ca75cSDavid Gibson hwaddr ptex, int n) 1360e57ca75cSDavid Gibson { 1361ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1362e57ca75cSDavid Gibson hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 1363e57ca75cSDavid Gibson 1364e57ca75cSDavid Gibson if (!spapr->htab) { 1365e57ca75cSDavid Gibson /* 1366e57ca75cSDavid Gibson * HTAB is controlled by KVM. Fetch into temporary buffer 1367e57ca75cSDavid Gibson */ 1368e57ca75cSDavid Gibson ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64); 1369e57ca75cSDavid Gibson kvmppc_read_hptes(hptes, ptex, n); 1370e57ca75cSDavid Gibson return hptes; 1371e57ca75cSDavid Gibson } 1372e57ca75cSDavid Gibson 1373e57ca75cSDavid Gibson /* 1374e57ca75cSDavid Gibson * HTAB is controlled by QEMU. Just point to the internally 1375e57ca75cSDavid Gibson * accessible PTEG. 1376e57ca75cSDavid Gibson */ 1377e57ca75cSDavid Gibson return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset); 1378e57ca75cSDavid Gibson } 1379e57ca75cSDavid Gibson 1380e57ca75cSDavid Gibson static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp, 1381e57ca75cSDavid Gibson const ppc_hash_pte64_t *hptes, 1382e57ca75cSDavid Gibson hwaddr ptex, int n) 1383e57ca75cSDavid Gibson { 1384ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1385e57ca75cSDavid Gibson 1386e57ca75cSDavid Gibson if (!spapr->htab) { 1387e57ca75cSDavid Gibson g_free((void *)hptes); 1388e57ca75cSDavid Gibson } 1389e57ca75cSDavid Gibson 1390e57ca75cSDavid Gibson /* Nothing to do for qemu managed HPT */ 1391e57ca75cSDavid Gibson } 1392e57ca75cSDavid Gibson 1393a2dd4e83SBenjamin Herrenschmidt void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 1394e57ca75cSDavid Gibson uint64_t pte0, uint64_t pte1) 1395e57ca75cSDavid Gibson { 1396a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp); 1397e57ca75cSDavid Gibson hwaddr offset = ptex * HASH_PTE_SIZE_64; 1398e57ca75cSDavid Gibson 1399e57ca75cSDavid Gibson if (!spapr->htab) { 1400e57ca75cSDavid Gibson kvmppc_write_hpte(ptex, pte0, pte1); 1401e57ca75cSDavid Gibson } else { 14023054b0caSBenjamin Herrenschmidt if (pte0 & HPTE64_V_VALID) { 1403e57ca75cSDavid Gibson stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); 14043054b0caSBenjamin Herrenschmidt /* 14053054b0caSBenjamin Herrenschmidt * When setting valid, we write PTE1 first. This ensures 14063054b0caSBenjamin Herrenschmidt * proper synchronization with the reading code in 14073054b0caSBenjamin Herrenschmidt * ppc_hash64_pteg_search() 14083054b0caSBenjamin Herrenschmidt */ 14093054b0caSBenjamin Herrenschmidt smp_wmb(); 14103054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset, pte0); 14113054b0caSBenjamin Herrenschmidt } else { 14123054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset, pte0); 14133054b0caSBenjamin Herrenschmidt /* 14143054b0caSBenjamin Herrenschmidt * When clearing it we set PTE0 first. This ensures proper 14153054b0caSBenjamin Herrenschmidt * synchronization with the reading code in 14163054b0caSBenjamin Herrenschmidt * ppc_hash64_pteg_search() 14173054b0caSBenjamin Herrenschmidt */ 14183054b0caSBenjamin Herrenschmidt smp_wmb(); 14193054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); 14203054b0caSBenjamin Herrenschmidt } 1421e57ca75cSDavid Gibson } 1422e57ca75cSDavid Gibson } 1423e57ca75cSDavid Gibson 1424a2dd4e83SBenjamin Herrenschmidt static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1425a2dd4e83SBenjamin Herrenschmidt uint64_t pte1) 1426a2dd4e83SBenjamin Herrenschmidt { 1427a2dd4e83SBenjamin Herrenschmidt hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15; 1428a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1429a2dd4e83SBenjamin Herrenschmidt 1430a2dd4e83SBenjamin Herrenschmidt if (!spapr->htab) { 1431a2dd4e83SBenjamin Herrenschmidt /* There should always be a hash table when this is called */ 1432a2dd4e83SBenjamin Herrenschmidt error_report("spapr_hpte_set_c called with no hash table !"); 1433a2dd4e83SBenjamin Herrenschmidt return; 1434a2dd4e83SBenjamin Herrenschmidt } 1435a2dd4e83SBenjamin Herrenschmidt 1436a2dd4e83SBenjamin Herrenschmidt /* The HW performs a non-atomic byte update */ 1437a2dd4e83SBenjamin Herrenschmidt stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80); 1438a2dd4e83SBenjamin Herrenschmidt } 1439a2dd4e83SBenjamin Herrenschmidt 1440a2dd4e83SBenjamin Herrenschmidt static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1441a2dd4e83SBenjamin Herrenschmidt uint64_t pte1) 1442a2dd4e83SBenjamin Herrenschmidt { 1443a2dd4e83SBenjamin Herrenschmidt hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14; 1444a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1445a2dd4e83SBenjamin Herrenschmidt 1446a2dd4e83SBenjamin Herrenschmidt if (!spapr->htab) { 1447a2dd4e83SBenjamin Herrenschmidt /* There should always be a hash table when this is called */ 1448a2dd4e83SBenjamin Herrenschmidt error_report("spapr_hpte_set_r called with no hash table !"); 1449a2dd4e83SBenjamin Herrenschmidt return; 1450a2dd4e83SBenjamin Herrenschmidt } 1451a2dd4e83SBenjamin Herrenschmidt 1452a2dd4e83SBenjamin Herrenschmidt /* The HW performs a non-atomic byte update */ 1453a2dd4e83SBenjamin Herrenschmidt stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01); 1454a2dd4e83SBenjamin Herrenschmidt } 1455a2dd4e83SBenjamin Herrenschmidt 14560b0b8310SDavid Gibson int spapr_hpt_shift_for_ramsize(uint64_t ramsize) 14578dfe8e7fSDavid Gibson { 14588dfe8e7fSDavid Gibson int shift; 14598dfe8e7fSDavid Gibson 14608dfe8e7fSDavid Gibson /* We aim for a hash table of size 1/128 the size of RAM (rounded 14618dfe8e7fSDavid Gibson * up). The PAPR recommendation is actually 1/64 of RAM size, but 14628dfe8e7fSDavid Gibson * that's much more than is needed for Linux guests */ 14638dfe8e7fSDavid Gibson shift = ctz64(pow2ceil(ramsize)) - 7; 14648dfe8e7fSDavid Gibson shift = MAX(shift, 18); /* Minimum architected size */ 14658dfe8e7fSDavid Gibson shift = MIN(shift, 46); /* Maximum architected size */ 14668dfe8e7fSDavid Gibson return shift; 14678dfe8e7fSDavid Gibson } 14688dfe8e7fSDavid Gibson 1469ce2918cbSDavid Gibson void spapr_free_hpt(SpaprMachineState *spapr) 147006ec79e8SBharata B Rao { 147106ec79e8SBharata B Rao g_free(spapr->htab); 147206ec79e8SBharata B Rao spapr->htab = NULL; 147306ec79e8SBharata B Rao spapr->htab_shift = 0; 147406ec79e8SBharata B Rao close_htab_fd(spapr); 147506ec79e8SBharata B Rao } 147606ec79e8SBharata B Rao 1477ce2918cbSDavid Gibson void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, 1478c5f54f3eSDavid Gibson Error **errp) 147953018216SPaolo Bonzini { 1480c5f54f3eSDavid Gibson long rc; 148153018216SPaolo Bonzini 1482c5f54f3eSDavid Gibson /* Clean up any HPT info from a previous boot */ 148306ec79e8SBharata B Rao spapr_free_hpt(spapr); 148453018216SPaolo Bonzini 1485c5f54f3eSDavid Gibson rc = kvmppc_reset_htab(shift); 1486f0638a0bSFabiano Rosas 1487f0638a0bSFabiano Rosas if (rc == -EOPNOTSUPP) { 1488f0638a0bSFabiano Rosas error_setg(errp, "HPT not supported in nested guests"); 1489f0638a0bSFabiano Rosas return; 1490f0638a0bSFabiano Rosas } 1491f0638a0bSFabiano Rosas 1492c5f54f3eSDavid Gibson if (rc < 0) { 1493c5f54f3eSDavid Gibson /* kernel-side HPT needed, but couldn't allocate one */ 1494c5f54f3eSDavid Gibson error_setg_errno(errp, errno, 1495c5f54f3eSDavid Gibson "Failed to allocate KVM HPT of order %d (try smaller maxmem?)", 1496c5f54f3eSDavid Gibson shift); 1497c5f54f3eSDavid Gibson /* This is almost certainly fatal, but if the caller really 1498c5f54f3eSDavid Gibson * wants to carry on with shift == 0, it's welcome to try */ 1499c5f54f3eSDavid Gibson } else if (rc > 0) { 1500c5f54f3eSDavid Gibson /* kernel-side HPT allocated */ 1501c5f54f3eSDavid Gibson if (rc != shift) { 1502c5f54f3eSDavid Gibson error_setg(errp, 1503c5f54f3eSDavid Gibson "Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)", 1504c5f54f3eSDavid Gibson shift, rc); 15057735fedaSBharata B Rao } 15067735fedaSBharata B Rao 150753018216SPaolo Bonzini spapr->htab_shift = shift; 1508c18ad9a5SDavid Gibson spapr->htab = NULL; 1509b817772aSBharata B Rao } else { 1510c5f54f3eSDavid Gibson /* kernel-side HPT not needed, allocate in userspace instead */ 1511c5f54f3eSDavid Gibson size_t size = 1ULL << shift; 1512c5f54f3eSDavid Gibson int i; 151301a57972SSamuel Mendoza-Jonas 1514c5f54f3eSDavid Gibson spapr->htab = qemu_memalign(size, size); 1515c5f54f3eSDavid Gibson if (!spapr->htab) { 1516c5f54f3eSDavid Gibson error_setg_errno(errp, errno, 1517c5f54f3eSDavid Gibson "Could not allocate HPT of order %d", shift); 1518c5f54f3eSDavid Gibson return; 1519b817772aSBharata B Rao } 1520b817772aSBharata B Rao 1521c5f54f3eSDavid Gibson memset(spapr->htab, 0, size); 1522c5f54f3eSDavid Gibson spapr->htab_shift = shift; 1523b817772aSBharata B Rao 1524c5f54f3eSDavid Gibson for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { 1525c5f54f3eSDavid Gibson DIRTY_HPTE(HPTE(spapr->htab, i)); 15267735fedaSBharata B Rao } 152753018216SPaolo Bonzini } 1528ee4d9eccSSuraj Jitindar Singh /* We're setting up a hash table, so that means we're not radix */ 1529176dcceeSSuraj Jitindar Singh spapr->patb_entry = 0; 153000fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(0, LPCR_HR | LPCR_UPRT); 153153018216SPaolo Bonzini } 153253018216SPaolo Bonzini 15338897ea5aSDavid Gibson void spapr_setup_hpt(SpaprMachineState *spapr) 1534b4db5413SSuraj Jitindar Singh { 15352772cf6bSDavid Gibson int hpt_shift; 15362772cf6bSDavid Gibson 1537087820e3SGreg Kurz if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { 15382772cf6bSDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); 15392772cf6bSDavid Gibson } else { 1540768a20f3SDavid Gibson uint64_t current_ram_size; 1541768a20f3SDavid Gibson 1542768a20f3SDavid Gibson current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size(); 1543768a20f3SDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size); 15442772cf6bSDavid Gibson } 15452772cf6bSDavid Gibson spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal); 15462772cf6bSDavid Gibson 15478897ea5aSDavid Gibson if (kvm_enabled()) { 15486a84737cSDavid Gibson hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift); 15496a84737cSDavid Gibson 15508897ea5aSDavid Gibson /* Check our RMA fits in the possible VRMA */ 15518897ea5aSDavid Gibson if (vrma_limit < spapr->rma_size) { 15528897ea5aSDavid Gibson error_report("Unable to create %" HWADDR_PRIu 15538897ea5aSDavid Gibson "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB", 15548897ea5aSDavid Gibson spapr->rma_size / MiB, vrma_limit / MiB); 15558897ea5aSDavid Gibson exit(EXIT_FAILURE); 15568897ea5aSDavid Gibson } 1557b4db5413SSuraj Jitindar Singh } 1558b4db5413SSuraj Jitindar Singh } 1559b4db5413SSuraj Jitindar Singh 156082512483SGreg Kurz static int spapr_reset_drcs(Object *child, void *opaque) 156182512483SGreg Kurz { 1562ce2918cbSDavid Gibson SpaprDrc *drc = 1563ce2918cbSDavid Gibson (SpaprDrc *) object_dynamic_cast(child, 156482512483SGreg Kurz TYPE_SPAPR_DR_CONNECTOR); 156582512483SGreg Kurz 156682512483SGreg Kurz if (drc) { 156782512483SGreg Kurz spapr_drc_reset(drc); 156882512483SGreg Kurz } 156982512483SGreg Kurz 157082512483SGreg Kurz return 0; 157182512483SGreg Kurz } 157282512483SGreg Kurz 1573a0628599SLike Xu static void spapr_machine_reset(MachineState *machine) 157453018216SPaolo Bonzini { 1575ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(machine); 1576182735efSAndreas Färber PowerPCCPU *first_ppc_cpu; 1577744a928cSAlexey Kardashevskiy hwaddr fdt_addr; 1578997b6cfcSDavid Gibson void *fdt; 1579997b6cfcSDavid Gibson int rc; 1580259186a7SAndreas Färber 1581905db916SBharata B Rao kvmppc_svm_off(&error_fatal); 15829f6edd06SDavid Gibson spapr_caps_apply(spapr); 158333face6bSDavid Gibson 15841481fe5fSLaurent Vivier first_ppc_cpu = POWERPC_CPU(first_cpu); 15851481fe5fSLaurent Vivier if (kvm_enabled() && kvmppc_has_cap_mmu_radix() && 1586ad99d04cSDavid Gibson ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, 15871481fe5fSLaurent Vivier spapr->max_compat_pvr)) { 158879825f4dSBenjamin Herrenschmidt /* 158979825f4dSBenjamin Herrenschmidt * If using KVM with radix mode available, VCPUs can be started 1590b4db5413SSuraj Jitindar Singh * without a HPT because KVM will start them in radix mode. 159179825f4dSBenjamin Herrenschmidt * Set the GR bit in PATE so that we know there is no HPT. 159279825f4dSBenjamin Herrenschmidt */ 159379825f4dSBenjamin Herrenschmidt spapr->patb_entry = PATE1_GR; 159400fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT); 1595b4db5413SSuraj Jitindar Singh } else { 15968897ea5aSDavid Gibson spapr_setup_hpt(spapr); 1597c5f54f3eSDavid Gibson } 159853018216SPaolo Bonzini 159925c9780dSDavid Gibson qemu_devices_reset(); 160025c9780dSDavid Gibson 16019012a53fSGreg Kurz spapr_ovec_cleanup(spapr->ov5_cas); 16029012a53fSGreg Kurz spapr->ov5_cas = spapr_ovec_new(); 16039012a53fSGreg Kurz 1604ce03a193SLaurent Vivier ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal); 16059012a53fSGreg Kurz 1606ec132efaSAlexey Kardashevskiy /* 1607b2e22477SCédric Le Goater * This is fixing some of the default configuration of the XIVE 1608b2e22477SCédric Le Goater * devices. To be called after the reset of the machine devices. 1609b2e22477SCédric Le Goater */ 1610b2e22477SCédric Le Goater spapr_irq_reset(spapr, &error_fatal); 1611b2e22477SCédric Le Goater 161223ff81bdSGreg Kurz /* 161323ff81bdSGreg Kurz * There is no CAS under qtest. Simulate one to please the code that 161423ff81bdSGreg Kurz * depends on spapr->ov5_cas. This is especially needed to test device 161523ff81bdSGreg Kurz * unplug, so we do that before resetting the DRCs. 161623ff81bdSGreg Kurz */ 161723ff81bdSGreg Kurz if (qtest_enabled()) { 161823ff81bdSGreg Kurz spapr_ovec_cleanup(spapr->ov5_cas); 161923ff81bdSGreg Kurz spapr->ov5_cas = spapr_ovec_clone(spapr->ov5); 162023ff81bdSGreg Kurz } 162123ff81bdSGreg Kurz 162282512483SGreg Kurz /* DRC reset may cause a device to be unplugged. This will cause troubles 162382512483SGreg Kurz * if this device is used by another device (eg, a running vhost backend 162482512483SGreg Kurz * will crash QEMU if the DIMM holding the vring goes away). To avoid such 162582512483SGreg Kurz * situations, we reset DRCs after all devices have been reset. 162682512483SGreg Kurz */ 162782512483SGreg Kurz object_child_foreach_recursive(object_get_root(), spapr_reset_drcs, NULL); 162882512483SGreg Kurz 162956258174SDaniel Henrique Barboza spapr_clear_pending_events(spapr); 163053018216SPaolo Bonzini 1631b7d1f77aSBenjamin Herrenschmidt /* 1632b7d1f77aSBenjamin Herrenschmidt * We place the device tree and RTAS just below either the top of the RMA, 1633df269271SAlexey Kardashevskiy * or just below 2GB, whichever is lower, so that it can be 1634b7d1f77aSBenjamin Herrenschmidt * processed with 32-bit real mode code if necessary 1635b7d1f77aSBenjamin Herrenschmidt */ 1636744a928cSAlexey Kardashevskiy fdt_addr = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FDT_MAX_SIZE; 1637b7d1f77aSBenjamin Herrenschmidt 163897b32a6aSDavid Gibson fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE); 163953018216SPaolo Bonzini 1640997b6cfcSDavid Gibson rc = fdt_pack(fdt); 1641997b6cfcSDavid Gibson 1642997b6cfcSDavid Gibson /* Should only fail if we've built a corrupted tree */ 1643997b6cfcSDavid Gibson assert(rc == 0); 1644997b6cfcSDavid Gibson 1645997b6cfcSDavid Gibson /* Load the fdt */ 1646997b6cfcSDavid Gibson qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); 1647cae172abSDavid Gibson cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); 1648fea35ca4SAlexey Kardashevskiy g_free(spapr->fdt_blob); 1649fea35ca4SAlexey Kardashevskiy spapr->fdt_size = fdt_totalsize(fdt); 1650fea35ca4SAlexey Kardashevskiy spapr->fdt_initial_size = spapr->fdt_size; 1651fea35ca4SAlexey Kardashevskiy spapr->fdt_blob = fdt; 1652997b6cfcSDavid Gibson 165353018216SPaolo Bonzini /* Set up the entry state */ 1654395a20d3SAlexey Kardashevskiy spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, 0, fdt_addr, 0); 1655182735efSAndreas Färber first_ppc_cpu->env.gpr[5] = 0; 165653018216SPaolo Bonzini 1657edfdbf9cSNicholas Piggin spapr->fwnmi_system_reset_addr = -1; 16588af7e1feSNicholas Piggin spapr->fwnmi_machine_check_addr = -1; 16598af7e1feSNicholas Piggin spapr->fwnmi_machine_check_interlock = -1; 16609ac703acSAravinda Prasad 16619ac703acSAravinda Prasad /* Signal all vCPUs waiting on this condition */ 16628af7e1feSNicholas Piggin qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond); 16632500fb42SAravinda Prasad 16642500fb42SAravinda Prasad migrate_del_blocker(spapr->fwnmi_migration_blocker); 166553018216SPaolo Bonzini } 166653018216SPaolo Bonzini 1667ce2918cbSDavid Gibson static void spapr_create_nvram(SpaprMachineState *spapr) 166853018216SPaolo Bonzini { 16693e80f690SMarkus Armbruster DeviceState *dev = qdev_new("spapr-nvram"); 16703978b863SPaolo Bonzini DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); 167153018216SPaolo Bonzini 16723978b863SPaolo Bonzini if (dinfo) { 1673934df912SMarkus Armbruster qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo), 16746231a6daSMarkus Armbruster &error_fatal); 167553018216SPaolo Bonzini } 167653018216SPaolo Bonzini 16773e80f690SMarkus Armbruster qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal); 167853018216SPaolo Bonzini 1679ce2918cbSDavid Gibson spapr->nvram = (struct SpaprNvram *)dev; 168053018216SPaolo Bonzini } 168153018216SPaolo Bonzini 1682ce2918cbSDavid Gibson static void spapr_rtc_create(SpaprMachineState *spapr) 168328df36a1SDavid Gibson { 16849fc7fc4dSMarkus Armbruster object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc, 16859fc7fc4dSMarkus Armbruster sizeof(spapr->rtc), TYPE_SPAPR_RTC, 1686f6d4dca8SThomas Huth &error_fatal, NULL); 1687ce189ab2SMarkus Armbruster qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal); 1688147ff807SCédric Le Goater object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc), 1689d2623129SMarkus Armbruster "date"); 169028df36a1SDavid Gibson } 169128df36a1SDavid Gibson 169253018216SPaolo Bonzini /* Returns whether we want to use VGA or not */ 169314c6a894SDavid Gibson static bool spapr_vga_init(PCIBus *pci_bus, Error **errp) 169453018216SPaolo Bonzini { 169553018216SPaolo Bonzini switch (vga_interface_type) { 169653018216SPaolo Bonzini case VGA_NONE: 16977effdaa3SMark Wu return false; 16987effdaa3SMark Wu case VGA_DEVICE: 16997effdaa3SMark Wu return true; 170053018216SPaolo Bonzini case VGA_STD: 1701b798c190SBenjamin Herrenschmidt case VGA_VIRTIO: 17026e66d0c6SThomas Huth case VGA_CIRRUS: 170353018216SPaolo Bonzini return pci_vga_init(pci_bus) != NULL; 170453018216SPaolo Bonzini default: 170514c6a894SDavid Gibson error_setg(errp, 170614c6a894SDavid Gibson "Unsupported VGA mode, only -vga std or -vga virtio is supported"); 170714c6a894SDavid Gibson return false; 170853018216SPaolo Bonzini } 170953018216SPaolo Bonzini } 171053018216SPaolo Bonzini 17114e5fe368SSuraj Jitindar Singh static int spapr_pre_load(void *opaque) 17124e5fe368SSuraj Jitindar Singh { 17134e5fe368SSuraj Jitindar Singh int rc; 17144e5fe368SSuraj Jitindar Singh 17154e5fe368SSuraj Jitindar Singh rc = spapr_caps_pre_load(opaque); 17164e5fe368SSuraj Jitindar Singh if (rc) { 17174e5fe368SSuraj Jitindar Singh return rc; 17184e5fe368SSuraj Jitindar Singh } 17194e5fe368SSuraj Jitindar Singh 17204e5fe368SSuraj Jitindar Singh return 0; 17214e5fe368SSuraj Jitindar Singh } 17224e5fe368SSuraj Jitindar Singh 1723880ae7deSDavid Gibson static int spapr_post_load(void *opaque, int version_id) 1724880ae7deSDavid Gibson { 1725ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1726880ae7deSDavid Gibson int err = 0; 1727880ae7deSDavid Gibson 1728be85537dSDavid Gibson err = spapr_caps_post_migration(spapr); 1729be85537dSDavid Gibson if (err) { 1730be85537dSDavid Gibson return err; 1731be85537dSDavid Gibson } 1732be85537dSDavid Gibson 1733e502202cSCédric Le Goater /* 1734e502202cSCédric Le Goater * In earlier versions, there was no separate qdev for the PAPR 1735880ae7deSDavid Gibson * RTC, so the RTC offset was stored directly in sPAPREnvironment. 1736880ae7deSDavid Gibson * So when migrating from those versions, poke the incoming offset 1737e502202cSCédric Le Goater * value into the RTC device 1738e502202cSCédric Le Goater */ 1739880ae7deSDavid Gibson if (version_id < 3) { 1740147ff807SCédric Le Goater err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset); 1741e502202cSCédric Le Goater if (err) { 1742e502202cSCédric Le Goater return err; 1743e502202cSCédric Le Goater } 1744880ae7deSDavid Gibson } 1745880ae7deSDavid Gibson 17460c86b2dfSLaurent Vivier if (kvm_enabled() && spapr->patb_entry) { 1747d39c90f5SBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(first_cpu); 174879825f4dSBenjamin Herrenschmidt bool radix = !!(spapr->patb_entry & PATE1_GR); 1749d39c90f5SBharata B Rao bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE); 1750d39c90f5SBharata B Rao 175100fd075eSBenjamin Herrenschmidt /* 175200fd075eSBenjamin Herrenschmidt * Update LPCR:HR and UPRT as they may not be set properly in 175300fd075eSBenjamin Herrenschmidt * the stream 175400fd075eSBenjamin Herrenschmidt */ 175500fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0, 175600fd075eSBenjamin Herrenschmidt LPCR_HR | LPCR_UPRT); 175700fd075eSBenjamin Herrenschmidt 1758d39c90f5SBharata B Rao err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry); 1759d39c90f5SBharata B Rao if (err) { 1760d39c90f5SBharata B Rao error_report("Process table config unsupported by the host"); 1761d39c90f5SBharata B Rao return -EINVAL; 1762d39c90f5SBharata B Rao } 1763d39c90f5SBharata B Rao } 1764d39c90f5SBharata B Rao 17651c53b06cSCédric Le Goater err = spapr_irq_post_load(spapr, version_id); 17661c53b06cSCédric Le Goater if (err) { 17671c53b06cSCédric Le Goater return err; 17681c53b06cSCédric Le Goater } 17691c53b06cSCédric Le Goater 1770880ae7deSDavid Gibson return err; 1771880ae7deSDavid Gibson } 1772880ae7deSDavid Gibson 17734e5fe368SSuraj Jitindar Singh static int spapr_pre_save(void *opaque) 17744e5fe368SSuraj Jitindar Singh { 17754e5fe368SSuraj Jitindar Singh int rc; 17764e5fe368SSuraj Jitindar Singh 17774e5fe368SSuraj Jitindar Singh rc = spapr_caps_pre_save(opaque); 17784e5fe368SSuraj Jitindar Singh if (rc) { 17794e5fe368SSuraj Jitindar Singh return rc; 17804e5fe368SSuraj Jitindar Singh } 17814e5fe368SSuraj Jitindar Singh 17824e5fe368SSuraj Jitindar Singh return 0; 17834e5fe368SSuraj Jitindar Singh } 17844e5fe368SSuraj Jitindar Singh 1785880ae7deSDavid Gibson static bool version_before_3(void *opaque, int version_id) 1786880ae7deSDavid Gibson { 1787880ae7deSDavid Gibson return version_id < 3; 1788880ae7deSDavid Gibson } 1789880ae7deSDavid Gibson 1790fd38804bSDaniel Henrique Barboza static bool spapr_pending_events_needed(void *opaque) 1791fd38804bSDaniel Henrique Barboza { 1792ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1793fd38804bSDaniel Henrique Barboza return !QTAILQ_EMPTY(&spapr->pending_events); 1794fd38804bSDaniel Henrique Barboza } 1795fd38804bSDaniel Henrique Barboza 1796fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_event_entry = { 1797fd38804bSDaniel Henrique Barboza .name = "spapr_event_log_entry", 1798fd38804bSDaniel Henrique Barboza .version_id = 1, 1799fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1800fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 1801ce2918cbSDavid Gibson VMSTATE_UINT32(summary, SpaprEventLogEntry), 1802ce2918cbSDavid Gibson VMSTATE_UINT32(extended_length, SpaprEventLogEntry), 1803ce2918cbSDavid Gibson VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0, 18045341258eSDavid Gibson NULL, extended_length), 1805fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1806fd38804bSDaniel Henrique Barboza }, 1807fd38804bSDaniel Henrique Barboza }; 1808fd38804bSDaniel Henrique Barboza 1809fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_pending_events = { 1810fd38804bSDaniel Henrique Barboza .name = "spapr_pending_events", 1811fd38804bSDaniel Henrique Barboza .version_id = 1, 1812fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1813fd38804bSDaniel Henrique Barboza .needed = spapr_pending_events_needed, 1814fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 1815ce2918cbSDavid Gibson VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1, 1816ce2918cbSDavid Gibson vmstate_spapr_event_entry, SpaprEventLogEntry, next), 1817fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1818fd38804bSDaniel Henrique Barboza }, 1819fd38804bSDaniel Henrique Barboza }; 1820fd38804bSDaniel Henrique Barboza 182162ef3760SMichael Roth static bool spapr_ov5_cas_needed(void *opaque) 182262ef3760SMichael Roth { 1823ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 1824ce2918cbSDavid Gibson SpaprOptionVector *ov5_mask = spapr_ovec_new(); 182562ef3760SMichael Roth bool cas_needed; 182662ef3760SMichael Roth 1827ce2918cbSDavid Gibson /* Prior to the introduction of SpaprOptionVector, we had two option 182862ef3760SMichael Roth * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY. 182962ef3760SMichael Roth * Both of these options encode machine topology into the device-tree 183062ef3760SMichael Roth * in such a way that the now-booted OS should still be able to interact 183162ef3760SMichael Roth * appropriately with QEMU regardless of what options were actually 183262ef3760SMichael Roth * negotiatied on the source side. 183362ef3760SMichael Roth * 183462ef3760SMichael Roth * As such, we can avoid migrating the CAS-negotiated options if these 183562ef3760SMichael Roth * are the only options available on the current machine/platform. 183662ef3760SMichael Roth * Since these are the only options available for pseries-2.7 and 183762ef3760SMichael Roth * earlier, this allows us to maintain old->new/new->old migration 183862ef3760SMichael Roth * compatibility. 183962ef3760SMichael Roth * 184062ef3760SMichael Roth * For QEMU 2.8+, there are additional CAS-negotiatable options available 184162ef3760SMichael Roth * via default pseries-2.8 machines and explicit command-line parameters. 184262ef3760SMichael Roth * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware 184362ef3760SMichael Roth * of the actual CAS-negotiated values to continue working properly. For 184462ef3760SMichael Roth * example, availability of memory unplug depends on knowing whether 184562ef3760SMichael Roth * OV5_HP_EVT was negotiated via CAS. 184662ef3760SMichael Roth * 184762ef3760SMichael Roth * Thus, for any cases where the set of available CAS-negotiatable 184862ef3760SMichael Roth * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we 1849aef19c04SGreg Kurz * include the CAS-negotiated options in the migration stream, unless 1850aef19c04SGreg Kurz * if they affect boot time behaviour only. 185162ef3760SMichael Roth */ 185262ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY); 185362ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY); 1854aef19c04SGreg Kurz spapr_ovec_set(ov5_mask, OV5_DRMEM_V2); 185562ef3760SMichael Roth 1856d1d32d62SDavid Gibson /* We need extra information if we have any bits outside the mask 1857d1d32d62SDavid Gibson * defined above */ 1858d1d32d62SDavid Gibson cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask); 185962ef3760SMichael Roth 186062ef3760SMichael Roth spapr_ovec_cleanup(ov5_mask); 186162ef3760SMichael Roth 186262ef3760SMichael Roth return cas_needed; 186362ef3760SMichael Roth } 186462ef3760SMichael Roth 186562ef3760SMichael Roth static const VMStateDescription vmstate_spapr_ov5_cas = { 186662ef3760SMichael Roth .name = "spapr_option_vector_ov5_cas", 186762ef3760SMichael Roth .version_id = 1, 186862ef3760SMichael Roth .minimum_version_id = 1, 186962ef3760SMichael Roth .needed = spapr_ov5_cas_needed, 187062ef3760SMichael Roth .fields = (VMStateField[]) { 1871ce2918cbSDavid Gibson VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1, 1872ce2918cbSDavid Gibson vmstate_spapr_ovec, SpaprOptionVector), 187362ef3760SMichael Roth VMSTATE_END_OF_LIST() 187462ef3760SMichael Roth }, 187562ef3760SMichael Roth }; 187662ef3760SMichael Roth 18779861bb3eSSuraj Jitindar Singh static bool spapr_patb_entry_needed(void *opaque) 18789861bb3eSSuraj Jitindar Singh { 1879ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 18809861bb3eSSuraj Jitindar Singh 18819861bb3eSSuraj Jitindar Singh return !!spapr->patb_entry; 18829861bb3eSSuraj Jitindar Singh } 18839861bb3eSSuraj Jitindar Singh 18849861bb3eSSuraj Jitindar Singh static const VMStateDescription vmstate_spapr_patb_entry = { 18859861bb3eSSuraj Jitindar Singh .name = "spapr_patb_entry", 18869861bb3eSSuraj Jitindar Singh .version_id = 1, 18879861bb3eSSuraj Jitindar Singh .minimum_version_id = 1, 18889861bb3eSSuraj Jitindar Singh .needed = spapr_patb_entry_needed, 18899861bb3eSSuraj Jitindar Singh .fields = (VMStateField[]) { 1890ce2918cbSDavid Gibson VMSTATE_UINT64(patb_entry, SpaprMachineState), 18919861bb3eSSuraj Jitindar Singh VMSTATE_END_OF_LIST() 18929861bb3eSSuraj Jitindar Singh }, 18939861bb3eSSuraj Jitindar Singh }; 18949861bb3eSSuraj Jitindar Singh 189582cffa2eSCédric Le Goater static bool spapr_irq_map_needed(void *opaque) 189682cffa2eSCédric Le Goater { 1897ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 189882cffa2eSCédric Le Goater 189982cffa2eSCédric Le Goater return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr); 190082cffa2eSCédric Le Goater } 190182cffa2eSCédric Le Goater 190282cffa2eSCédric Le Goater static const VMStateDescription vmstate_spapr_irq_map = { 190382cffa2eSCédric Le Goater .name = "spapr_irq_map", 190482cffa2eSCédric Le Goater .version_id = 1, 190582cffa2eSCédric Le Goater .minimum_version_id = 1, 190682cffa2eSCédric Le Goater .needed = spapr_irq_map_needed, 190782cffa2eSCédric Le Goater .fields = (VMStateField[]) { 1908ce2918cbSDavid Gibson VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr), 190982cffa2eSCédric Le Goater VMSTATE_END_OF_LIST() 191082cffa2eSCédric Le Goater }, 191182cffa2eSCédric Le Goater }; 191282cffa2eSCédric Le Goater 1913fea35ca4SAlexey Kardashevskiy static bool spapr_dtb_needed(void *opaque) 1914fea35ca4SAlexey Kardashevskiy { 1915ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque); 1916fea35ca4SAlexey Kardashevskiy 1917fea35ca4SAlexey Kardashevskiy return smc->update_dt_enabled; 1918fea35ca4SAlexey Kardashevskiy } 1919fea35ca4SAlexey Kardashevskiy 1920fea35ca4SAlexey Kardashevskiy static int spapr_dtb_pre_load(void *opaque) 1921fea35ca4SAlexey Kardashevskiy { 1922ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1923fea35ca4SAlexey Kardashevskiy 1924fea35ca4SAlexey Kardashevskiy g_free(spapr->fdt_blob); 1925fea35ca4SAlexey Kardashevskiy spapr->fdt_blob = NULL; 1926fea35ca4SAlexey Kardashevskiy spapr->fdt_size = 0; 1927fea35ca4SAlexey Kardashevskiy 1928fea35ca4SAlexey Kardashevskiy return 0; 1929fea35ca4SAlexey Kardashevskiy } 1930fea35ca4SAlexey Kardashevskiy 1931fea35ca4SAlexey Kardashevskiy static const VMStateDescription vmstate_spapr_dtb = { 1932fea35ca4SAlexey Kardashevskiy .name = "spapr_dtb", 1933fea35ca4SAlexey Kardashevskiy .version_id = 1, 1934fea35ca4SAlexey Kardashevskiy .minimum_version_id = 1, 1935fea35ca4SAlexey Kardashevskiy .needed = spapr_dtb_needed, 1936fea35ca4SAlexey Kardashevskiy .pre_load = spapr_dtb_pre_load, 1937fea35ca4SAlexey Kardashevskiy .fields = (VMStateField[]) { 1938ce2918cbSDavid Gibson VMSTATE_UINT32(fdt_initial_size, SpaprMachineState), 1939ce2918cbSDavid Gibson VMSTATE_UINT32(fdt_size, SpaprMachineState), 1940ce2918cbSDavid Gibson VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL, 1941fea35ca4SAlexey Kardashevskiy fdt_size), 1942fea35ca4SAlexey Kardashevskiy VMSTATE_END_OF_LIST() 1943fea35ca4SAlexey Kardashevskiy }, 1944fea35ca4SAlexey Kardashevskiy }; 1945fea35ca4SAlexey Kardashevskiy 19462500fb42SAravinda Prasad static bool spapr_fwnmi_needed(void *opaque) 19472500fb42SAravinda Prasad { 19482500fb42SAravinda Prasad SpaprMachineState *spapr = (SpaprMachineState *)opaque; 19492500fb42SAravinda Prasad 19508af7e1feSNicholas Piggin return spapr->fwnmi_machine_check_addr != -1; 19512500fb42SAravinda Prasad } 19522500fb42SAravinda Prasad 19532500fb42SAravinda Prasad static int spapr_fwnmi_pre_save(void *opaque) 19542500fb42SAravinda Prasad { 19552500fb42SAravinda Prasad SpaprMachineState *spapr = (SpaprMachineState *)opaque; 19562500fb42SAravinda Prasad 19572500fb42SAravinda Prasad /* 19582500fb42SAravinda Prasad * Check if machine check handling is in progress and print a 19592500fb42SAravinda Prasad * warning message. 19602500fb42SAravinda Prasad */ 19618af7e1feSNicholas Piggin if (spapr->fwnmi_machine_check_interlock != -1) { 19622500fb42SAravinda Prasad warn_report("A machine check is being handled during migration. The" 19632500fb42SAravinda Prasad "handler may run and log hardware error on the destination"); 19642500fb42SAravinda Prasad } 19652500fb42SAravinda Prasad 19662500fb42SAravinda Prasad return 0; 19672500fb42SAravinda Prasad } 19682500fb42SAravinda Prasad 19698af7e1feSNicholas Piggin static const VMStateDescription vmstate_spapr_fwnmi = { 19708af7e1feSNicholas Piggin .name = "spapr_fwnmi", 19712500fb42SAravinda Prasad .version_id = 1, 19722500fb42SAravinda Prasad .minimum_version_id = 1, 19732500fb42SAravinda Prasad .needed = spapr_fwnmi_needed, 19742500fb42SAravinda Prasad .pre_save = spapr_fwnmi_pre_save, 19752500fb42SAravinda Prasad .fields = (VMStateField[]) { 1976edfdbf9cSNicholas Piggin VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState), 19778af7e1feSNicholas Piggin VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState), 19788af7e1feSNicholas Piggin VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState), 19792500fb42SAravinda Prasad VMSTATE_END_OF_LIST() 19802500fb42SAravinda Prasad }, 19812500fb42SAravinda Prasad }; 19822500fb42SAravinda Prasad 19834be21d56SDavid Gibson static const VMStateDescription vmstate_spapr = { 19844be21d56SDavid Gibson .name = "spapr", 1985880ae7deSDavid Gibson .version_id = 3, 19864be21d56SDavid Gibson .minimum_version_id = 1, 19874e5fe368SSuraj Jitindar Singh .pre_load = spapr_pre_load, 1988880ae7deSDavid Gibson .post_load = spapr_post_load, 19894e5fe368SSuraj Jitindar Singh .pre_save = spapr_pre_save, 19904be21d56SDavid Gibson .fields = (VMStateField[]) { 1991880ae7deSDavid Gibson /* used to be @next_irq */ 1992880ae7deSDavid Gibson VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4), 19934be21d56SDavid Gibson 19944be21d56SDavid Gibson /* RTC offset */ 1995ce2918cbSDavid Gibson VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3), 1996880ae7deSDavid Gibson 1997ce2918cbSDavid Gibson VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2), 19984be21d56SDavid Gibson VMSTATE_END_OF_LIST() 19994be21d56SDavid Gibson }, 200062ef3760SMichael Roth .subsections = (const VMStateDescription*[]) { 200162ef3760SMichael Roth &vmstate_spapr_ov5_cas, 20029861bb3eSSuraj Jitindar Singh &vmstate_spapr_patb_entry, 2003fd38804bSDaniel Henrique Barboza &vmstate_spapr_pending_events, 20044e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_htm, 20054e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_vsx, 20064e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_dfp, 20078f38eaf8SSuraj Jitindar Singh &vmstate_spapr_cap_cfpc, 200809114fd8SSuraj Jitindar Singh &vmstate_spapr_cap_sbbc, 20094be8d4e7SSuraj Jitindar Singh &vmstate_spapr_cap_ibs, 201064d4a534SDavid Gibson &vmstate_spapr_cap_hpt_maxpagesize, 201182cffa2eSCédric Le Goater &vmstate_spapr_irq_map, 2012b9a477b7SSuraj Jitindar Singh &vmstate_spapr_cap_nested_kvm_hv, 2013fea35ca4SAlexey Kardashevskiy &vmstate_spapr_dtb, 2014c982f5cfSSuraj Jitindar Singh &vmstate_spapr_cap_large_decr, 20158ff43ee4SSuraj Jitindar Singh &vmstate_spapr_cap_ccf_assist, 20169d953ce4SAravinda Prasad &vmstate_spapr_cap_fwnmi, 20178af7e1feSNicholas Piggin &vmstate_spapr_fwnmi, 201862ef3760SMichael Roth NULL 201962ef3760SMichael Roth } 20204be21d56SDavid Gibson }; 20214be21d56SDavid Gibson 20224be21d56SDavid Gibson static int htab_save_setup(QEMUFile *f, void *opaque) 20234be21d56SDavid Gibson { 2024ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 20254be21d56SDavid Gibson 20264be21d56SDavid Gibson /* "Iteration" header */ 20273a384297SBharata B Rao if (!spapr->htab_shift) { 20283a384297SBharata B Rao qemu_put_be32(f, -1); 20293a384297SBharata B Rao } else { 20304be21d56SDavid Gibson qemu_put_be32(f, spapr->htab_shift); 20313a384297SBharata B Rao } 20324be21d56SDavid Gibson 2033e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 2034e68cb8b4SAlexey Kardashevskiy spapr->htab_save_index = 0; 2035e68cb8b4SAlexey Kardashevskiy spapr->htab_first_pass = true; 2036e68cb8b4SAlexey Kardashevskiy } else { 20373a384297SBharata B Rao if (spapr->htab_shift) { 2038e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 20394be21d56SDavid Gibson } 20403a384297SBharata B Rao } 20414be21d56SDavid Gibson 2042e68cb8b4SAlexey Kardashevskiy 2043e68cb8b4SAlexey Kardashevskiy return 0; 2044e68cb8b4SAlexey Kardashevskiy } 20454be21d56SDavid Gibson 2046ce2918cbSDavid Gibson static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr, 2047332f7721SGreg Kurz int chunkstart, int n_valid, int n_invalid) 2048332f7721SGreg Kurz { 2049332f7721SGreg Kurz qemu_put_be32(f, chunkstart); 2050332f7721SGreg Kurz qemu_put_be16(f, n_valid); 2051332f7721SGreg Kurz qemu_put_be16(f, n_invalid); 2052332f7721SGreg Kurz qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), 2053332f7721SGreg Kurz HASH_PTE_SIZE_64 * n_valid); 2054332f7721SGreg Kurz } 2055332f7721SGreg Kurz 2056332f7721SGreg Kurz static void htab_save_end_marker(QEMUFile *f) 2057332f7721SGreg Kurz { 2058332f7721SGreg Kurz qemu_put_be32(f, 0); 2059332f7721SGreg Kurz qemu_put_be16(f, 0); 2060332f7721SGreg Kurz qemu_put_be16(f, 0); 2061332f7721SGreg Kurz } 2062332f7721SGreg Kurz 2063ce2918cbSDavid Gibson static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr, 20644be21d56SDavid Gibson int64_t max_ns) 20654be21d56SDavid Gibson { 2066378bc217SDavid Gibson bool has_timeout = max_ns != -1; 20674be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 20684be21d56SDavid Gibson int index = spapr->htab_save_index; 2069bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 20704be21d56SDavid Gibson 20714be21d56SDavid Gibson assert(spapr->htab_first_pass); 20724be21d56SDavid Gibson 20734be21d56SDavid Gibson do { 20744be21d56SDavid Gibson int chunkstart; 20754be21d56SDavid Gibson 20764be21d56SDavid Gibson /* Consume invalid HPTEs */ 20774be21d56SDavid Gibson while ((index < htabslots) 20784be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 20794be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 208024ec2863SMarc-André Lureau index++; 20814be21d56SDavid Gibson } 20824be21d56SDavid Gibson 20834be21d56SDavid Gibson /* Consume valid HPTEs */ 20844be21d56SDavid Gibson chunkstart = index; 2085338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 20864be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 20874be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 208824ec2863SMarc-André Lureau index++; 20894be21d56SDavid Gibson } 20904be21d56SDavid Gibson 20914be21d56SDavid Gibson if (index > chunkstart) { 20924be21d56SDavid Gibson int n_valid = index - chunkstart; 20934be21d56SDavid Gibson 2094332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, 0); 20954be21d56SDavid Gibson 2096378bc217SDavid Gibson if (has_timeout && 2097378bc217SDavid Gibson (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 20984be21d56SDavid Gibson break; 20994be21d56SDavid Gibson } 21004be21d56SDavid Gibson } 21014be21d56SDavid Gibson } while ((index < htabslots) && !qemu_file_rate_limit(f)); 21024be21d56SDavid Gibson 21034be21d56SDavid Gibson if (index >= htabslots) { 21044be21d56SDavid Gibson assert(index == htabslots); 21054be21d56SDavid Gibson index = 0; 21064be21d56SDavid Gibson spapr->htab_first_pass = false; 21074be21d56SDavid Gibson } 21084be21d56SDavid Gibson spapr->htab_save_index = index; 21094be21d56SDavid Gibson } 21104be21d56SDavid Gibson 2111ce2918cbSDavid Gibson static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, 21124be21d56SDavid Gibson int64_t max_ns) 21134be21d56SDavid Gibson { 21144be21d56SDavid Gibson bool final = max_ns < 0; 21154be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 21164be21d56SDavid Gibson int examined = 0, sent = 0; 21174be21d56SDavid Gibson int index = spapr->htab_save_index; 2118bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 21194be21d56SDavid Gibson 21204be21d56SDavid Gibson assert(!spapr->htab_first_pass); 21214be21d56SDavid Gibson 21224be21d56SDavid Gibson do { 21234be21d56SDavid Gibson int chunkstart, invalidstart; 21244be21d56SDavid Gibson 21254be21d56SDavid Gibson /* Consume non-dirty HPTEs */ 21264be21d56SDavid Gibson while ((index < htabslots) 21274be21d56SDavid Gibson && !HPTE_DIRTY(HPTE(spapr->htab, index))) { 21284be21d56SDavid Gibson index++; 21294be21d56SDavid Gibson examined++; 21304be21d56SDavid Gibson } 21314be21d56SDavid Gibson 21324be21d56SDavid Gibson chunkstart = index; 21334be21d56SDavid Gibson /* Consume valid dirty HPTEs */ 2134338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 21354be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 21364be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 21374be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 21384be21d56SDavid Gibson index++; 21394be21d56SDavid Gibson examined++; 21404be21d56SDavid Gibson } 21414be21d56SDavid Gibson 21424be21d56SDavid Gibson invalidstart = index; 21434be21d56SDavid Gibson /* Consume invalid dirty HPTEs */ 2144338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - invalidstart < USHRT_MAX) 21454be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 21464be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 21474be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 21484be21d56SDavid Gibson index++; 21494be21d56SDavid Gibson examined++; 21504be21d56SDavid Gibson } 21514be21d56SDavid Gibson 21524be21d56SDavid Gibson if (index > chunkstart) { 21534be21d56SDavid Gibson int n_valid = invalidstart - chunkstart; 21544be21d56SDavid Gibson int n_invalid = index - invalidstart; 21554be21d56SDavid Gibson 2156332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid); 21574be21d56SDavid Gibson sent += index - chunkstart; 21584be21d56SDavid Gibson 2159bc72ad67SAlex Bligh if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 21604be21d56SDavid Gibson break; 21614be21d56SDavid Gibson } 21624be21d56SDavid Gibson } 21634be21d56SDavid Gibson 21644be21d56SDavid Gibson if (examined >= htabslots) { 21654be21d56SDavid Gibson break; 21664be21d56SDavid Gibson } 21674be21d56SDavid Gibson 21684be21d56SDavid Gibson if (index >= htabslots) { 21694be21d56SDavid Gibson assert(index == htabslots); 21704be21d56SDavid Gibson index = 0; 21714be21d56SDavid Gibson } 21724be21d56SDavid Gibson } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); 21734be21d56SDavid Gibson 21744be21d56SDavid Gibson if (index >= htabslots) { 21754be21d56SDavid Gibson assert(index == htabslots); 21764be21d56SDavid Gibson index = 0; 21774be21d56SDavid Gibson } 21784be21d56SDavid Gibson 21794be21d56SDavid Gibson spapr->htab_save_index = index; 21804be21d56SDavid Gibson 2181e68cb8b4SAlexey Kardashevskiy return (examined >= htabslots) && (sent == 0) ? 1 : 0; 21824be21d56SDavid Gibson } 21834be21d56SDavid Gibson 2184e68cb8b4SAlexey Kardashevskiy #define MAX_ITERATION_NS 5000000 /* 5 ms */ 2185e68cb8b4SAlexey Kardashevskiy #define MAX_KVM_BUF_SIZE 2048 2186e68cb8b4SAlexey Kardashevskiy 21874be21d56SDavid Gibson static int htab_save_iterate(QEMUFile *f, void *opaque) 21884be21d56SDavid Gibson { 2189ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2190715c5407SDavid Gibson int fd; 2191e68cb8b4SAlexey Kardashevskiy int rc = 0; 21924be21d56SDavid Gibson 21934be21d56SDavid Gibson /* Iteration header */ 21943a384297SBharata B Rao if (!spapr->htab_shift) { 21953a384297SBharata B Rao qemu_put_be32(f, -1); 2196e8cd4247SLaurent Vivier return 1; 21973a384297SBharata B Rao } else { 21984be21d56SDavid Gibson qemu_put_be32(f, 0); 21993a384297SBharata B Rao } 22004be21d56SDavid Gibson 2201e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2202e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2203e68cb8b4SAlexey Kardashevskiy 2204715c5407SDavid Gibson fd = get_htab_fd(spapr); 2205715c5407SDavid Gibson if (fd < 0) { 2206715c5407SDavid Gibson return fd; 220701a57972SSamuel Mendoza-Jonas } 220801a57972SSamuel Mendoza-Jonas 2209715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS); 2210e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2211e68cb8b4SAlexey Kardashevskiy return rc; 2212e68cb8b4SAlexey Kardashevskiy } 2213e68cb8b4SAlexey Kardashevskiy } else if (spapr->htab_first_pass) { 22144be21d56SDavid Gibson htab_save_first_pass(f, spapr, MAX_ITERATION_NS); 22154be21d56SDavid Gibson } else { 2216e68cb8b4SAlexey Kardashevskiy rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS); 22174be21d56SDavid Gibson } 22184be21d56SDavid Gibson 2219332f7721SGreg Kurz htab_save_end_marker(f); 22204be21d56SDavid Gibson 2221e68cb8b4SAlexey Kardashevskiy return rc; 22224be21d56SDavid Gibson } 22234be21d56SDavid Gibson 22244be21d56SDavid Gibson static int htab_save_complete(QEMUFile *f, void *opaque) 22254be21d56SDavid Gibson { 2226ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2227715c5407SDavid Gibson int fd; 22284be21d56SDavid Gibson 22294be21d56SDavid Gibson /* Iteration header */ 22303a384297SBharata B Rao if (!spapr->htab_shift) { 22313a384297SBharata B Rao qemu_put_be32(f, -1); 22323a384297SBharata B Rao return 0; 22333a384297SBharata B Rao } else { 22344be21d56SDavid Gibson qemu_put_be32(f, 0); 22353a384297SBharata B Rao } 22364be21d56SDavid Gibson 2237e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2238e68cb8b4SAlexey Kardashevskiy int rc; 2239e68cb8b4SAlexey Kardashevskiy 2240e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2241e68cb8b4SAlexey Kardashevskiy 2242715c5407SDavid Gibson fd = get_htab_fd(spapr); 2243715c5407SDavid Gibson if (fd < 0) { 2244715c5407SDavid Gibson return fd; 224501a57972SSamuel Mendoza-Jonas } 224601a57972SSamuel Mendoza-Jonas 2247715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1); 2248e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2249e68cb8b4SAlexey Kardashevskiy return rc; 2250e68cb8b4SAlexey Kardashevskiy } 2251e68cb8b4SAlexey Kardashevskiy } else { 2252378bc217SDavid Gibson if (spapr->htab_first_pass) { 2253378bc217SDavid Gibson htab_save_first_pass(f, spapr, -1); 2254378bc217SDavid Gibson } 22554be21d56SDavid Gibson htab_save_later_pass(f, spapr, -1); 2256e68cb8b4SAlexey Kardashevskiy } 22574be21d56SDavid Gibson 22584be21d56SDavid Gibson /* End marker */ 2259332f7721SGreg Kurz htab_save_end_marker(f); 22604be21d56SDavid Gibson 22614be21d56SDavid Gibson return 0; 22624be21d56SDavid Gibson } 22634be21d56SDavid Gibson 22644be21d56SDavid Gibson static int htab_load(QEMUFile *f, void *opaque, int version_id) 22654be21d56SDavid Gibson { 2266ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 22674be21d56SDavid Gibson uint32_t section_hdr; 2268e68cb8b4SAlexey Kardashevskiy int fd = -1; 226914b0d748SGreg Kurz Error *local_err = NULL; 22704be21d56SDavid Gibson 22714be21d56SDavid Gibson if (version_id < 1 || version_id > 1) { 227298a5d100SDavid Gibson error_report("htab_load() bad version"); 22734be21d56SDavid Gibson return -EINVAL; 22744be21d56SDavid Gibson } 22754be21d56SDavid Gibson 22764be21d56SDavid Gibson section_hdr = qemu_get_be32(f); 22774be21d56SDavid Gibson 22783a384297SBharata B Rao if (section_hdr == -1) { 22793a384297SBharata B Rao spapr_free_hpt(spapr); 22803a384297SBharata B Rao return 0; 22813a384297SBharata B Rao } 22823a384297SBharata B Rao 22834be21d56SDavid Gibson if (section_hdr) { 2284c5f54f3eSDavid Gibson /* First section gives the htab size */ 2285c5f54f3eSDavid Gibson spapr_reallocate_hpt(spapr, section_hdr, &local_err); 2286c5f54f3eSDavid Gibson if (local_err) { 2287c5f54f3eSDavid Gibson error_report_err(local_err); 22884be21d56SDavid Gibson return -EINVAL; 22894be21d56SDavid Gibson } 22904be21d56SDavid Gibson return 0; 22914be21d56SDavid Gibson } 22924be21d56SDavid Gibson 2293e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2294e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2295e68cb8b4SAlexey Kardashevskiy 229614b0d748SGreg Kurz fd = kvmppc_get_htab_fd(true, 0, &local_err); 2297e68cb8b4SAlexey Kardashevskiy if (fd < 0) { 229814b0d748SGreg Kurz error_report_err(local_err); 229982be8e73SGreg Kurz return fd; 2300e68cb8b4SAlexey Kardashevskiy } 2301e68cb8b4SAlexey Kardashevskiy } 2302e68cb8b4SAlexey Kardashevskiy 23034be21d56SDavid Gibson while (true) { 23044be21d56SDavid Gibson uint32_t index; 23054be21d56SDavid Gibson uint16_t n_valid, n_invalid; 23064be21d56SDavid Gibson 23074be21d56SDavid Gibson index = qemu_get_be32(f); 23084be21d56SDavid Gibson n_valid = qemu_get_be16(f); 23094be21d56SDavid Gibson n_invalid = qemu_get_be16(f); 23104be21d56SDavid Gibson 23114be21d56SDavid Gibson if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) { 23124be21d56SDavid Gibson /* End of Stream */ 23134be21d56SDavid Gibson break; 23144be21d56SDavid Gibson } 23154be21d56SDavid Gibson 2316e68cb8b4SAlexey Kardashevskiy if ((index + n_valid + n_invalid) > 23174be21d56SDavid Gibson (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) { 23184be21d56SDavid Gibson /* Bad index in stream */ 231998a5d100SDavid Gibson error_report( 232098a5d100SDavid Gibson "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)", 232198a5d100SDavid Gibson index, n_valid, n_invalid, spapr->htab_shift); 23224be21d56SDavid Gibson return -EINVAL; 23234be21d56SDavid Gibson } 23244be21d56SDavid Gibson 2325e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 23264be21d56SDavid Gibson if (n_valid) { 23274be21d56SDavid Gibson qemu_get_buffer(f, HPTE(spapr->htab, index), 23284be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_valid); 23294be21d56SDavid Gibson } 23304be21d56SDavid Gibson if (n_invalid) { 23314be21d56SDavid Gibson memset(HPTE(spapr->htab, index + n_valid), 0, 23324be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_invalid); 23334be21d56SDavid Gibson } 2334e68cb8b4SAlexey Kardashevskiy } else { 2335e68cb8b4SAlexey Kardashevskiy int rc; 2336e68cb8b4SAlexey Kardashevskiy 2337e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2338e68cb8b4SAlexey Kardashevskiy 2339e68cb8b4SAlexey Kardashevskiy rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid); 2340e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2341e68cb8b4SAlexey Kardashevskiy return rc; 2342e68cb8b4SAlexey Kardashevskiy } 2343e68cb8b4SAlexey Kardashevskiy } 2344e68cb8b4SAlexey Kardashevskiy } 2345e68cb8b4SAlexey Kardashevskiy 2346e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2347e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2348e68cb8b4SAlexey Kardashevskiy close(fd); 23494be21d56SDavid Gibson } 23504be21d56SDavid Gibson 23514be21d56SDavid Gibson return 0; 23524be21d56SDavid Gibson } 23534be21d56SDavid Gibson 235470f794fcSJuan Quintela static void htab_save_cleanup(void *opaque) 2355c573fc03SThomas Huth { 2356ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2357c573fc03SThomas Huth 2358c573fc03SThomas Huth close_htab_fd(spapr); 2359c573fc03SThomas Huth } 2360c573fc03SThomas Huth 23614be21d56SDavid Gibson static SaveVMHandlers savevm_htab_handlers = { 23629907e842SJuan Quintela .save_setup = htab_save_setup, 23634be21d56SDavid Gibson .save_live_iterate = htab_save_iterate, 2364a3e06c3dSDr. David Alan Gilbert .save_live_complete_precopy = htab_save_complete, 236570f794fcSJuan Quintela .save_cleanup = htab_save_cleanup, 23664be21d56SDavid Gibson .load_state = htab_load, 23674be21d56SDavid Gibson }; 23684be21d56SDavid Gibson 23695b2128d2SAlexander Graf static void spapr_boot_set(void *opaque, const char *boot_device, 23705b2128d2SAlexander Graf Error **errp) 23715b2128d2SAlexander Graf { 2372c86c1affSDaniel Henrique Barboza MachineState *machine = MACHINE(opaque); 23735b2128d2SAlexander Graf machine->boot_order = g_strdup(boot_device); 23745b2128d2SAlexander Graf } 23755b2128d2SAlexander Graf 2376ce2918cbSDavid Gibson static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr) 2377224245bfSDavid Gibson { 2378224245bfSDavid Gibson MachineState *machine = MACHINE(spapr); 2379224245bfSDavid Gibson uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 2380e8f986fcSBharata B Rao uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size; 2381224245bfSDavid Gibson int i; 2382224245bfSDavid Gibson 2383224245bfSDavid Gibson for (i = 0; i < nr_lmbs; i++) { 2384224245bfSDavid Gibson uint64_t addr; 2385224245bfSDavid Gibson 2386b0c14ec4SDavid Hildenbrand addr = i * lmb_size + machine->device_memory->base; 23876caf3ac6SDavid Gibson spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB, 2388224245bfSDavid Gibson addr / lmb_size); 2389224245bfSDavid Gibson } 2390224245bfSDavid Gibson } 2391224245bfSDavid Gibson 2392224245bfSDavid Gibson /* 2393224245bfSDavid Gibson * If RAM size, maxmem size and individual node mem sizes aren't aligned 2394224245bfSDavid Gibson * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest 2395224245bfSDavid Gibson * since we can't support such unaligned sizes with DRCONF_MEMORY. 2396224245bfSDavid Gibson */ 23977c150d6fSDavid Gibson static void spapr_validate_node_memory(MachineState *machine, Error **errp) 2398224245bfSDavid Gibson { 2399224245bfSDavid Gibson int i; 2400224245bfSDavid Gibson 24017c150d6fSDavid Gibson if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) { 24027c150d6fSDavid Gibson error_setg(errp, "Memory size 0x" RAM_ADDR_FMT 2403ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 24047c150d6fSDavid Gibson machine->ram_size, 2405d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 24067c150d6fSDavid Gibson return; 24077c150d6fSDavid Gibson } 24087c150d6fSDavid Gibson 24097c150d6fSDavid Gibson if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) { 24107c150d6fSDavid Gibson error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT 2411ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 24127c150d6fSDavid Gibson machine->ram_size, 2413d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 24147c150d6fSDavid Gibson return; 2415224245bfSDavid Gibson } 2416224245bfSDavid Gibson 2417aa570207STao Xu for (i = 0; i < machine->numa_state->num_nodes; i++) { 24187e721e7bSTao Xu if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) { 24197c150d6fSDavid Gibson error_setg(errp, 24207c150d6fSDavid Gibson "Node %d memory size 0x%" PRIx64 2421ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 24227e721e7bSTao Xu i, machine->numa_state->nodes[i].node_mem, 2423d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 24247c150d6fSDavid Gibson return; 2425224245bfSDavid Gibson } 2426224245bfSDavid Gibson } 2427224245bfSDavid Gibson } 2428224245bfSDavid Gibson 2429535455fdSIgor Mammedov /* find cpu slot in machine->possible_cpus by core_id */ 2430535455fdSIgor Mammedov static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx) 2431535455fdSIgor Mammedov { 2432fe6b6346SLike Xu int index = id / ms->smp.threads; 2433535455fdSIgor Mammedov 2434535455fdSIgor Mammedov if (index >= ms->possible_cpus->len) { 2435535455fdSIgor Mammedov return NULL; 2436535455fdSIgor Mammedov } 2437535455fdSIgor Mammedov if (idx) { 2438535455fdSIgor Mammedov *idx = index; 2439535455fdSIgor Mammedov } 2440535455fdSIgor Mammedov return &ms->possible_cpus->cpus[index]; 2441535455fdSIgor Mammedov } 2442535455fdSIgor Mammedov 2443ce2918cbSDavid Gibson static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp) 2444fa98fbfcSSam Bobroff { 2445fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 244629cb4187SGreg Kurz SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 2447fa98fbfcSSam Bobroff Error *local_err = NULL; 2448fa98fbfcSSam Bobroff bool vsmt_user = !!spapr->vsmt; 2449fa98fbfcSSam Bobroff int kvm_smt = kvmppc_smt_threads(); 2450fa98fbfcSSam Bobroff int ret; 2451fe6b6346SLike Xu unsigned int smp_threads = ms->smp.threads; 2452fa98fbfcSSam Bobroff 2453fa98fbfcSSam Bobroff if (!kvm_enabled() && (smp_threads > 1)) { 2454dcfe4805SMarkus Armbruster error_setg(errp, "TCG cannot support more than 1 thread/core " 2455fa98fbfcSSam Bobroff "on a pseries machine"); 2456dcfe4805SMarkus Armbruster return; 2457fa98fbfcSSam Bobroff } 2458fa98fbfcSSam Bobroff if (!is_power_of_2(smp_threads)) { 2459dcfe4805SMarkus Armbruster error_setg(errp, "Cannot support %d threads/core on a pseries " 2460fa98fbfcSSam Bobroff "machine because it must be a power of 2", smp_threads); 2461dcfe4805SMarkus Armbruster return; 2462fa98fbfcSSam Bobroff } 2463fa98fbfcSSam Bobroff 2464fa98fbfcSSam Bobroff /* Detemine the VSMT mode to use: */ 2465fa98fbfcSSam Bobroff if (vsmt_user) { 2466fa98fbfcSSam Bobroff if (spapr->vsmt < smp_threads) { 2467dcfe4805SMarkus Armbruster error_setg(errp, "Cannot support VSMT mode %d" 2468fa98fbfcSSam Bobroff " because it must be >= threads/core (%d)", 2469fa98fbfcSSam Bobroff spapr->vsmt, smp_threads); 2470dcfe4805SMarkus Armbruster return; 2471fa98fbfcSSam Bobroff } 2472fa98fbfcSSam Bobroff /* In this case, spapr->vsmt has been set by the command line */ 247329cb4187SGreg Kurz } else if (!smc->smp_threads_vsmt) { 24748904e5a7SDavid Gibson /* 24758904e5a7SDavid Gibson * Default VSMT value is tricky, because we need it to be as 24768904e5a7SDavid Gibson * consistent as possible (for migration), but this requires 24778904e5a7SDavid Gibson * changing it for at least some existing cases. We pick 8 as 24788904e5a7SDavid Gibson * the value that we'd get with KVM on POWER8, the 24798904e5a7SDavid Gibson * overwhelmingly common case in production systems. 24808904e5a7SDavid Gibson */ 24814ad64cbdSLaurent Vivier spapr->vsmt = MAX(8, smp_threads); 248229cb4187SGreg Kurz } else { 248329cb4187SGreg Kurz spapr->vsmt = smp_threads; 2484fa98fbfcSSam Bobroff } 2485fa98fbfcSSam Bobroff 2486fa98fbfcSSam Bobroff /* KVM: If necessary, set the SMT mode: */ 2487fa98fbfcSSam Bobroff if (kvm_enabled() && (spapr->vsmt != kvm_smt)) { 2488fa98fbfcSSam Bobroff ret = kvmppc_set_smt_threads(spapr->vsmt); 2489fa98fbfcSSam Bobroff if (ret) { 24901f20f2e0SDavid Gibson /* Looks like KVM isn't able to change VSMT mode */ 2491fa98fbfcSSam Bobroff error_setg(&local_err, 2492fa98fbfcSSam Bobroff "Failed to set KVM's VSMT mode to %d (errno %d)", 2493fa98fbfcSSam Bobroff spapr->vsmt, ret); 24941f20f2e0SDavid Gibson /* We can live with that if the default one is big enough 24951f20f2e0SDavid Gibson * for the number of threads, and a submultiple of the one 24961f20f2e0SDavid Gibson * we want. In this case we'll waste some vcpu ids, but 24971f20f2e0SDavid Gibson * behaviour will be correct */ 24981f20f2e0SDavid Gibson if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) { 24991f20f2e0SDavid Gibson warn_report_err(local_err); 25001f20f2e0SDavid Gibson } else { 2501fa98fbfcSSam Bobroff if (!vsmt_user) { 25021f20f2e0SDavid Gibson error_append_hint(&local_err, 25031f20f2e0SDavid Gibson "On PPC, a VM with %d threads/core" 25041f20f2e0SDavid Gibson " on a host with %d threads/core" 25051f20f2e0SDavid Gibson " requires the use of VSMT mode %d.\n", 2506fa98fbfcSSam Bobroff smp_threads, kvm_smt, spapr->vsmt); 2507fa98fbfcSSam Bobroff } 2508cdcca22aSVladimir Sementsov-Ogievskiy kvmppc_error_append_smt_possible_hint(&local_err); 2509dcfe4805SMarkus Armbruster error_propagate(errp, local_err); 2510fa98fbfcSSam Bobroff } 2511fa98fbfcSSam Bobroff } 25121f20f2e0SDavid Gibson } 2513fa98fbfcSSam Bobroff /* else TCG: nothing to do currently */ 2514fa98fbfcSSam Bobroff } 2515fa98fbfcSSam Bobroff 2516ce2918cbSDavid Gibson static void spapr_init_cpus(SpaprMachineState *spapr) 25171a5008fcSGreg Kurz { 25181a5008fcSGreg Kurz MachineState *machine = MACHINE(spapr); 25191a5008fcSGreg Kurz MachineClass *mc = MACHINE_GET_CLASS(machine); 2520ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 25211a5008fcSGreg Kurz const char *type = spapr_get_cpu_core_type(machine->cpu_type); 25221a5008fcSGreg Kurz const CPUArchIdList *possible_cpus; 2523fe6b6346SLike Xu unsigned int smp_cpus = machine->smp.cpus; 2524fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 2525fe6b6346SLike Xu unsigned int max_cpus = machine->smp.max_cpus; 25261a5008fcSGreg Kurz int boot_cores_nr = smp_cpus / smp_threads; 25271a5008fcSGreg Kurz int i; 25281a5008fcSGreg Kurz 25291a5008fcSGreg Kurz possible_cpus = mc->possible_cpu_arch_ids(machine); 25301a5008fcSGreg Kurz if (mc->has_hotpluggable_cpus) { 25311a5008fcSGreg Kurz if (smp_cpus % smp_threads) { 25321a5008fcSGreg Kurz error_report("smp_cpus (%u) must be multiple of threads (%u)", 25331a5008fcSGreg Kurz smp_cpus, smp_threads); 25341a5008fcSGreg Kurz exit(1); 25351a5008fcSGreg Kurz } 25361a5008fcSGreg Kurz if (max_cpus % smp_threads) { 25371a5008fcSGreg Kurz error_report("max_cpus (%u) must be multiple of threads (%u)", 25381a5008fcSGreg Kurz max_cpus, smp_threads); 25391a5008fcSGreg Kurz exit(1); 25401a5008fcSGreg Kurz } 25411a5008fcSGreg Kurz } else { 25421a5008fcSGreg Kurz if (max_cpus != smp_cpus) { 25431a5008fcSGreg Kurz error_report("This machine version does not support CPU hotplug"); 25441a5008fcSGreg Kurz exit(1); 25451a5008fcSGreg Kurz } 25461a5008fcSGreg Kurz boot_cores_nr = possible_cpus->len; 25471a5008fcSGreg Kurz } 25481a5008fcSGreg Kurz 25491a5008fcSGreg Kurz if (smc->pre_2_10_has_unused_icps) { 25501a5008fcSGreg Kurz int i; 25511a5008fcSGreg Kurz 25521a518e76SCédric Le Goater for (i = 0; i < spapr_max_server_number(spapr); i++) { 25531a5008fcSGreg Kurz /* Dummy entries get deregistered when real ICPState objects 25541a5008fcSGreg Kurz * are registered during CPU core hotplug. 25551a5008fcSGreg Kurz */ 25561a5008fcSGreg Kurz pre_2_10_vmstate_register_dummy_icp(i); 25571a5008fcSGreg Kurz } 25581a5008fcSGreg Kurz } 25591a5008fcSGreg Kurz 25601a5008fcSGreg Kurz for (i = 0; i < possible_cpus->len; i++) { 25611a5008fcSGreg Kurz int core_id = i * smp_threads; 25621a5008fcSGreg Kurz 25631a5008fcSGreg Kurz if (mc->has_hotpluggable_cpus) { 25641a5008fcSGreg Kurz spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU, 25651a5008fcSGreg Kurz spapr_vcpu_id(spapr, core_id)); 25661a5008fcSGreg Kurz } 25671a5008fcSGreg Kurz 25681a5008fcSGreg Kurz if (i < boot_cores_nr) { 25691a5008fcSGreg Kurz Object *core = object_new(type); 25701a5008fcSGreg Kurz int nr_threads = smp_threads; 25711a5008fcSGreg Kurz 25721a5008fcSGreg Kurz /* Handle the partially filled core for older machine types */ 25731a5008fcSGreg Kurz if ((i + 1) * smp_threads >= smp_cpus) { 25741a5008fcSGreg Kurz nr_threads = smp_cpus - i * smp_threads; 25751a5008fcSGreg Kurz } 25761a5008fcSGreg Kurz 25775325cc34SMarkus Armbruster object_property_set_int(core, "nr-threads", nr_threads, 25781a5008fcSGreg Kurz &error_fatal); 25795325cc34SMarkus Armbruster object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id, 25801a5008fcSGreg Kurz &error_fatal); 2581ce189ab2SMarkus Armbruster qdev_realize(DEVICE(core), NULL, &error_fatal); 2582ecda255eSSam Bobroff 2583ecda255eSSam Bobroff object_unref(core); 25841a5008fcSGreg Kurz } 25851a5008fcSGreg Kurz } 25861a5008fcSGreg Kurz } 25871a5008fcSGreg Kurz 2588999c9cafSGreg Kurz static PCIHostState *spapr_create_default_phb(void) 2589999c9cafSGreg Kurz { 2590999c9cafSGreg Kurz DeviceState *dev; 2591999c9cafSGreg Kurz 25923e80f690SMarkus Armbruster dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE); 2593999c9cafSGreg Kurz qdev_prop_set_uint32(dev, "index", 0); 25943c6ef471SMarkus Armbruster sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 2595999c9cafSGreg Kurz 2596999c9cafSGreg Kurz return PCI_HOST_BRIDGE(dev); 2597999c9cafSGreg Kurz } 2598999c9cafSGreg Kurz 2599425f0b7aSDavid Gibson static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp) 2600425f0b7aSDavid Gibson { 2601425f0b7aSDavid Gibson MachineState *machine = MACHINE(spapr); 2602425f0b7aSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 2603425f0b7aSDavid Gibson hwaddr rma_size = machine->ram_size; 2604425f0b7aSDavid Gibson hwaddr node0_size = spapr_node0_size(machine); 2605425f0b7aSDavid Gibson 2606425f0b7aSDavid Gibson /* RMA has to fit in the first NUMA node */ 2607425f0b7aSDavid Gibson rma_size = MIN(rma_size, node0_size); 2608425f0b7aSDavid Gibson 2609425f0b7aSDavid Gibson /* 2610425f0b7aSDavid Gibson * VRMA access is via a special 1TiB SLB mapping, so the RMA can 2611425f0b7aSDavid Gibson * never exceed that 2612425f0b7aSDavid Gibson */ 2613425f0b7aSDavid Gibson rma_size = MIN(rma_size, 1 * TiB); 2614425f0b7aSDavid Gibson 2615425f0b7aSDavid Gibson /* 2616425f0b7aSDavid Gibson * Clamp the RMA size based on machine type. This is for 2617425f0b7aSDavid Gibson * migration compatibility with older qemu versions, which limited 2618425f0b7aSDavid Gibson * the RMA size for complicated and mostly bad reasons. 2619425f0b7aSDavid Gibson */ 2620425f0b7aSDavid Gibson if (smc->rma_limit) { 2621425f0b7aSDavid Gibson rma_size = MIN(rma_size, smc->rma_limit); 2622425f0b7aSDavid Gibson } 2623425f0b7aSDavid Gibson 2624425f0b7aSDavid Gibson if (rma_size < MIN_RMA_SLOF) { 2625425f0b7aSDavid Gibson error_setg(errp, 2626425f0b7aSDavid Gibson "pSeries SLOF firmware requires >= %" HWADDR_PRIx 2627425f0b7aSDavid Gibson "ldMiB guest RMA (Real Mode Area memory)", 2628425f0b7aSDavid Gibson MIN_RMA_SLOF / MiB); 2629425f0b7aSDavid Gibson return 0; 2630425f0b7aSDavid Gibson } 2631425f0b7aSDavid Gibson 2632425f0b7aSDavid Gibson return rma_size; 2633425f0b7aSDavid Gibson } 2634425f0b7aSDavid Gibson 263553018216SPaolo Bonzini /* pSeries LPAR / sPAPR hardware init */ 2636bcb5ce08SDavid Gibson static void spapr_machine_init(MachineState *machine) 263753018216SPaolo Bonzini { 2638ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(machine); 2639ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 2640ee3a71e3SShivaprasad G Bhat MachineClass *mc = MACHINE_GET_CLASS(machine); 26413ef96221SMarcel Apfelbaum const char *kernel_filename = machine->kernel_filename; 26423ef96221SMarcel Apfelbaum const char *initrd_filename = machine->initrd_filename; 264353018216SPaolo Bonzini PCIHostState *phb; 264453018216SPaolo Bonzini int i; 264553018216SPaolo Bonzini MemoryRegion *sysmem = get_system_memory(); 2646b7d1f77aSBenjamin Herrenschmidt long load_limit, fw_size; 264753018216SPaolo Bonzini char *filename; 264830f4b05bSDavid Gibson Error *resize_hpt_err = NULL; 264953018216SPaolo Bonzini 2650226419d6SMichael S. Tsirkin msi_nonbroken = true; 265153018216SPaolo Bonzini 265253018216SPaolo Bonzini QLIST_INIT(&spapr->phbs); 26530cffce56SDavid Gibson QTAILQ_INIT(&spapr->pending_dimm_unplugs); 265453018216SPaolo Bonzini 26559f6edd06SDavid Gibson /* Determine capabilities to run with */ 26569f6edd06SDavid Gibson spapr_caps_init(spapr); 26579f6edd06SDavid Gibson 265830f4b05bSDavid Gibson kvmppc_check_papr_resize_hpt(&resize_hpt_err); 265930f4b05bSDavid Gibson if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) { 266030f4b05bSDavid Gibson /* 266130f4b05bSDavid Gibson * If the user explicitly requested a mode we should either 266230f4b05bSDavid Gibson * supply it, or fail completely (which we do below). But if 266330f4b05bSDavid Gibson * it's not set explicitly, we reset our mode to something 266430f4b05bSDavid Gibson * that works 266530f4b05bSDavid Gibson */ 266630f4b05bSDavid Gibson if (resize_hpt_err) { 266730f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 266830f4b05bSDavid Gibson error_free(resize_hpt_err); 266930f4b05bSDavid Gibson resize_hpt_err = NULL; 267030f4b05bSDavid Gibson } else { 267130f4b05bSDavid Gibson spapr->resize_hpt = smc->resize_hpt_default; 267230f4b05bSDavid Gibson } 267330f4b05bSDavid Gibson } 267430f4b05bSDavid Gibson 267530f4b05bSDavid Gibson assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT); 267630f4b05bSDavid Gibson 267730f4b05bSDavid Gibson if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) { 267830f4b05bSDavid Gibson /* 267930f4b05bSDavid Gibson * User requested HPT resize, but this host can't supply it. Bail out 268030f4b05bSDavid Gibson */ 268130f4b05bSDavid Gibson error_report_err(resize_hpt_err); 268230f4b05bSDavid Gibson exit(1); 268330f4b05bSDavid Gibson } 268414963c34SMarkus Armbruster error_free(resize_hpt_err); 268530f4b05bSDavid Gibson 2686425f0b7aSDavid Gibson spapr->rma_size = spapr_rma_size(spapr, &error_fatal); 2687c4177479SAlexey Kardashevskiy 2688b7d1f77aSBenjamin Herrenschmidt /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */ 2689b7d1f77aSBenjamin Herrenschmidt load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD; 269053018216SPaolo Bonzini 2691482969d6SCédric Le Goater /* 2692482969d6SCédric Le Goater * VSMT must be set in order to be able to compute VCPU ids, ie to 26931a518e76SCédric Le Goater * call spapr_max_server_number() or spapr_vcpu_id(). 2694482969d6SCédric Le Goater */ 2695482969d6SCédric Le Goater spapr_set_vsmt_mode(spapr, &error_fatal); 2696482969d6SCédric Le Goater 26977b565160SDavid Gibson /* Set up Interrupt Controller before we create the VCPUs */ 2698fab397d8SCédric Le Goater spapr_irq_init(spapr, &error_fatal); 26997b565160SDavid Gibson 2700dc1b5eeeSGreg Kurz /* Set up containers for ibm,client-architecture-support negotiated options 2701dc1b5eeeSGreg Kurz */ 2702facdb8b6SMichael Roth spapr->ov5 = spapr_ovec_new(); 2703facdb8b6SMichael Roth spapr->ov5_cas = spapr_ovec_new(); 2704facdb8b6SMichael Roth 2705224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2706facdb8b6SMichael Roth spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); 27077c150d6fSDavid Gibson spapr_validate_node_memory(machine, &error_fatal); 2708224245bfSDavid Gibson } 2709224245bfSDavid Gibson 2710417ece33SMichael Roth spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); 2711417ece33SMichael Roth 2712ffbb1705SMichael Roth /* advertise support for dedicated HP event source to guests */ 2713ffbb1705SMichael Roth if (spapr->use_hotplug_event_source) { 2714ffbb1705SMichael Roth spapr_ovec_set(spapr->ov5, OV5_HP_EVT); 2715ffbb1705SMichael Roth } 2716ffbb1705SMichael Roth 27172772cf6bSDavid Gibson /* advertise support for HPT resizing */ 27182772cf6bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 27192772cf6bSDavid Gibson spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE); 27202772cf6bSDavid Gibson } 27212772cf6bSDavid Gibson 2722a324d6f1SBharata B Rao /* advertise support for ibm,dyamic-memory-v2 */ 2723a324d6f1SBharata B Rao spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2); 2724a324d6f1SBharata B Rao 2725db592b5bSCédric Le Goater /* advertise XIVE on POWER9 machines */ 2726ca62823bSDavid Gibson if (spapr->irq->xive) { 2727db592b5bSCédric Le Goater spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); 2728db592b5bSCédric Le Goater } 2729db592b5bSCédric Le Goater 273053018216SPaolo Bonzini /* init CPUs */ 27310c86d0fdSDavid Gibson spapr_init_cpus(spapr); 273253018216SPaolo Bonzini 273358c46efaSLaurent Vivier /* 273458c46efaSLaurent Vivier * check we don't have a memory-less/cpu-less NUMA node 273558c46efaSLaurent Vivier * Firmware relies on the existing memory/cpu topology to provide the 273658c46efaSLaurent Vivier * NUMA topology to the kernel. 273758c46efaSLaurent Vivier * And the linux kernel needs to know the NUMA topology at start 273858c46efaSLaurent Vivier * to be able to hotplug CPUs later. 273958c46efaSLaurent Vivier */ 274058c46efaSLaurent Vivier if (machine->numa_state->num_nodes) { 274158c46efaSLaurent Vivier for (i = 0; i < machine->numa_state->num_nodes; ++i) { 274258c46efaSLaurent Vivier /* check for memory-less node */ 274358c46efaSLaurent Vivier if (machine->numa_state->nodes[i].node_mem == 0) { 274458c46efaSLaurent Vivier CPUState *cs; 274558c46efaSLaurent Vivier int found = 0; 274658c46efaSLaurent Vivier /* check for cpu-less node */ 274758c46efaSLaurent Vivier CPU_FOREACH(cs) { 274858c46efaSLaurent Vivier PowerPCCPU *cpu = POWERPC_CPU(cs); 274958c46efaSLaurent Vivier if (cpu->node_id == i) { 275058c46efaSLaurent Vivier found = 1; 275158c46efaSLaurent Vivier break; 275258c46efaSLaurent Vivier } 275358c46efaSLaurent Vivier } 275458c46efaSLaurent Vivier /* memory-less and cpu-less node */ 275558c46efaSLaurent Vivier if (!found) { 275658c46efaSLaurent Vivier error_report( 275758c46efaSLaurent Vivier "Memory-less/cpu-less nodes are not supported (node %d)", 275858c46efaSLaurent Vivier i); 275958c46efaSLaurent Vivier exit(1); 276058c46efaSLaurent Vivier } 276158c46efaSLaurent Vivier } 276258c46efaSLaurent Vivier } 276358c46efaSLaurent Vivier 276458c46efaSLaurent Vivier } 276558c46efaSLaurent Vivier 2766db5127b2SDavid Gibson /* 2767db5127b2SDavid Gibson * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. 2768db5127b2SDavid Gibson * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is 2769db5127b2SDavid Gibson * called from vPHB reset handler so we initialize the counter here. 2770db5127b2SDavid Gibson * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM 2771db5127b2SDavid Gibson * must be equally distant from any other node. 2772db5127b2SDavid Gibson * The final value of spapr->gpu_numa_id is going to be written to 2773db5127b2SDavid Gibson * max-associativity-domains in spapr_build_fdt(). 2774db5127b2SDavid Gibson */ 2775db5127b2SDavid Gibson spapr->gpu_numa_id = MAX(1, machine->numa_state->num_nodes); 2776db5127b2SDavid Gibson 2777f1aa45ffSDaniel Henrique Barboza /* Init numa_assoc_array */ 2778f1aa45ffSDaniel Henrique Barboza spapr_numa_associativity_init(spapr, machine); 2779f1aa45ffSDaniel Henrique Barboza 27800550b120SGreg Kurz if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) && 2781ad99d04cSDavid Gibson ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, 27820550b120SGreg Kurz spapr->max_compat_pvr)) { 2783b4b83312SGreg Kurz spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300); 27840550b120SGreg Kurz /* KVM and TCG always allow GTSE with radix... */ 27850550b120SGreg Kurz spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE); 27860550b120SGreg Kurz } 27870550b120SGreg Kurz /* ... but not with hash (currently). */ 27880550b120SGreg Kurz 2789026bfd89SDavid Gibson if (kvm_enabled()) { 2790026bfd89SDavid Gibson /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */ 2791026bfd89SDavid Gibson kvmppc_enable_logical_ci_hcalls(); 2792ef9971ddSAlexey Kardashevskiy kvmppc_enable_set_mode_hcall(); 27935145ad4fSNathan Whitehorn 27945145ad4fSNathan Whitehorn /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */ 27955145ad4fSNathan Whitehorn kvmppc_enable_clear_ref_mod_hcalls(); 279668f9f708SSuraj Jitindar Singh 279768f9f708SSuraj Jitindar Singh /* Enable H_PAGE_INIT */ 279868f9f708SSuraj Jitindar Singh kvmppc_enable_h_page_init(); 2799026bfd89SDavid Gibson } 2800026bfd89SDavid Gibson 2801ab74e543SIgor Mammedov /* map RAM */ 2802ab74e543SIgor Mammedov memory_region_add_subregion(sysmem, 0, machine->ram); 280353018216SPaolo Bonzini 2804b0c14ec4SDavid Hildenbrand /* always allocate the device memory information */ 2805b0c14ec4SDavid Hildenbrand machine->device_memory = g_malloc0(sizeof(*machine->device_memory)); 2806b0c14ec4SDavid Hildenbrand 28074a1c9cf0SBharata B Rao /* initialize hotplug memory address space */ 28084a1c9cf0SBharata B Rao if (machine->ram_size < machine->maxram_size) { 28090c9269a5SDavid Hildenbrand ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size; 281071c9a3ddSBharata B Rao /* 281171c9a3ddSBharata B Rao * Limit the number of hotpluggable memory slots to half the number 281271c9a3ddSBharata B Rao * slots that KVM supports, leaving the other half for PCI and other 281371c9a3ddSBharata B Rao * devices. However ensure that number of slots doesn't drop below 32. 281471c9a3ddSBharata B Rao */ 281571c9a3ddSBharata B Rao int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 : 281671c9a3ddSBharata B Rao SPAPR_MAX_RAM_SLOTS; 28174a1c9cf0SBharata B Rao 281871c9a3ddSBharata B Rao if (max_memslots < SPAPR_MAX_RAM_SLOTS) { 281971c9a3ddSBharata B Rao max_memslots = SPAPR_MAX_RAM_SLOTS; 282071c9a3ddSBharata B Rao } 282171c9a3ddSBharata B Rao if (machine->ram_slots > max_memslots) { 2822d54e4d76SDavid Gibson error_report("Specified number of memory slots %" 2823d54e4d76SDavid Gibson PRIu64" exceeds max supported %d", 282471c9a3ddSBharata B Rao machine->ram_slots, max_memslots); 2825d54e4d76SDavid Gibson exit(1); 28264a1c9cf0SBharata B Rao } 28274a1c9cf0SBharata B Rao 2828b0c14ec4SDavid Hildenbrand machine->device_memory->base = ROUND_UP(machine->ram_size, 28290c9269a5SDavid Hildenbrand SPAPR_DEVICE_MEM_ALIGN); 2830b0c14ec4SDavid Hildenbrand memory_region_init(&machine->device_memory->mr, OBJECT(spapr), 28310c9269a5SDavid Hildenbrand "device-memory", device_mem_size); 2832b0c14ec4SDavid Hildenbrand memory_region_add_subregion(sysmem, machine->device_memory->base, 2833b0c14ec4SDavid Hildenbrand &machine->device_memory->mr); 28344a1c9cf0SBharata B Rao } 28354a1c9cf0SBharata B Rao 2836224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2837224245bfSDavid Gibson spapr_create_lmb_dr_connectors(spapr); 2838224245bfSDavid Gibson } 2839224245bfSDavid Gibson 28408af7e1feSNicholas Piggin if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_ON) { 28412500fb42SAravinda Prasad /* Create the error string for live migration blocker */ 28422500fb42SAravinda Prasad error_setg(&spapr->fwnmi_migration_blocker, 28432500fb42SAravinda Prasad "A machine check is being handled during migration. The handler" 28442500fb42SAravinda Prasad "may run and log hardware error on the destination"); 28452500fb42SAravinda Prasad } 28462500fb42SAravinda Prasad 2847ee3a71e3SShivaprasad G Bhat if (mc->nvdimm_supported) { 2848ee3a71e3SShivaprasad G Bhat spapr_create_nvdimm_dr_connectors(spapr); 2849ee3a71e3SShivaprasad G Bhat } 2850ee3a71e3SShivaprasad G Bhat 2851ffbb1705SMichael Roth /* Set up RTAS event infrastructure */ 285253018216SPaolo Bonzini spapr_events_init(spapr); 285353018216SPaolo Bonzini 285412f42174SDavid Gibson /* Set up the RTC RTAS interfaces */ 285528df36a1SDavid Gibson spapr_rtc_create(spapr); 285612f42174SDavid Gibson 285753018216SPaolo Bonzini /* Set up VIO bus */ 285853018216SPaolo Bonzini spapr->vio_bus = spapr_vio_bus_init(); 285953018216SPaolo Bonzini 2860b8846a4dSPeter Maydell for (i = 0; i < serial_max_hds(); i++) { 28619bca0edbSPeter Maydell if (serial_hd(i)) { 28629bca0edbSPeter Maydell spapr_vty_create(spapr->vio_bus, serial_hd(i)); 286353018216SPaolo Bonzini } 286453018216SPaolo Bonzini } 286553018216SPaolo Bonzini 286653018216SPaolo Bonzini /* We always have at least the nvram device on VIO */ 286753018216SPaolo Bonzini spapr_create_nvram(spapr); 286853018216SPaolo Bonzini 2869962b6c36SMichael Roth /* 2870962b6c36SMichael Roth * Setup hotplug / dynamic-reconfiguration connectors. top-level 2871962b6c36SMichael Roth * connectors (described in root DT node's "ibm,drc-types" property) 2872962b6c36SMichael Roth * are pre-initialized here. additional child connectors (such as 2873962b6c36SMichael Roth * connectors for a PHBs PCI slots) are added as needed during their 2874962b6c36SMichael Roth * parent's realization. 2875962b6c36SMichael Roth */ 2876962b6c36SMichael Roth if (smc->dr_phb_enabled) { 2877962b6c36SMichael Roth for (i = 0; i < SPAPR_MAX_PHBS; i++) { 2878962b6c36SMichael Roth spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i); 2879962b6c36SMichael Roth } 2880962b6c36SMichael Roth } 2881962b6c36SMichael Roth 288253018216SPaolo Bonzini /* Set up PCI */ 288353018216SPaolo Bonzini spapr_pci_rtas_init(); 288453018216SPaolo Bonzini 2885999c9cafSGreg Kurz phb = spapr_create_default_phb(); 288653018216SPaolo Bonzini 288753018216SPaolo Bonzini for (i = 0; i < nb_nics; i++) { 288853018216SPaolo Bonzini NICInfo *nd = &nd_table[i]; 288953018216SPaolo Bonzini 289053018216SPaolo Bonzini if (!nd->model) { 28913c3a4e7aSThomas Huth nd->model = g_strdup("spapr-vlan"); 289253018216SPaolo Bonzini } 289353018216SPaolo Bonzini 28943c3a4e7aSThomas Huth if (g_str_equal(nd->model, "spapr-vlan") || 28953c3a4e7aSThomas Huth g_str_equal(nd->model, "ibmveth")) { 289653018216SPaolo Bonzini spapr_vlan_create(spapr->vio_bus, nd); 289753018216SPaolo Bonzini } else { 289829b358f9SDavid Gibson pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); 289953018216SPaolo Bonzini } 290053018216SPaolo Bonzini } 290153018216SPaolo Bonzini 290253018216SPaolo Bonzini for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { 290353018216SPaolo Bonzini spapr_vscsi_create(spapr->vio_bus); 290453018216SPaolo Bonzini } 290553018216SPaolo Bonzini 290653018216SPaolo Bonzini /* Graphics */ 290714c6a894SDavid Gibson if (spapr_vga_init(phb->bus, &error_fatal)) { 290853018216SPaolo Bonzini spapr->has_graphics = true; 2909c6e76503SPaolo Bonzini machine->usb |= defaults_enabled() && !machine->usb_disabled; 291053018216SPaolo Bonzini } 291153018216SPaolo Bonzini 29124ee9ced9SMarcel Apfelbaum if (machine->usb) { 291357040d45SThomas Huth if (smc->use_ohci_by_default) { 291453018216SPaolo Bonzini pci_create_simple(phb->bus, -1, "pci-ohci"); 291557040d45SThomas Huth } else { 291657040d45SThomas Huth pci_create_simple(phb->bus, -1, "nec-usb-xhci"); 291757040d45SThomas Huth } 2918c86580b8SMarkus Armbruster 291953018216SPaolo Bonzini if (spapr->has_graphics) { 2920c86580b8SMarkus Armbruster USBBus *usb_bus = usb_bus_find(-1); 2921c86580b8SMarkus Armbruster 2922c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-kbd"); 2923c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-mouse"); 292453018216SPaolo Bonzini } 292553018216SPaolo Bonzini } 292653018216SPaolo Bonzini 292753018216SPaolo Bonzini if (kernel_filename) { 29284366e1dbSLiam Merwick spapr->kernel_size = load_elf(kernel_filename, NULL, 292987262806SAlexey Kardashevskiy translate_kernel_address, spapr, 2930617160c9SBALATON Zoltan NULL, NULL, NULL, NULL, 1, 2931a19f7fb0SDavid Gibson PPC_ELF_MACHINE, 0, 0); 2932a19f7fb0SDavid Gibson if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) { 29334366e1dbSLiam Merwick spapr->kernel_size = load_elf(kernel_filename, NULL, 2934617160c9SBALATON Zoltan translate_kernel_address, spapr, 2935617160c9SBALATON Zoltan NULL, NULL, NULL, NULL, 0, 2936617160c9SBALATON Zoltan PPC_ELF_MACHINE, 0, 0); 2937a19f7fb0SDavid Gibson spapr->kernel_le = spapr->kernel_size > 0; 293816457e7fSBenjamin Herrenschmidt } 2939a19f7fb0SDavid Gibson if (spapr->kernel_size < 0) { 2940a19f7fb0SDavid Gibson error_report("error loading %s: %s", kernel_filename, 2941a19f7fb0SDavid Gibson load_elf_strerror(spapr->kernel_size)); 294253018216SPaolo Bonzini exit(1); 294353018216SPaolo Bonzini } 294453018216SPaolo Bonzini 294553018216SPaolo Bonzini /* load initrd */ 294653018216SPaolo Bonzini if (initrd_filename) { 294753018216SPaolo Bonzini /* Try to locate the initrd in the gap between the kernel 294853018216SPaolo Bonzini * and the firmware. Add a bit of space just in case 294953018216SPaolo Bonzini */ 295087262806SAlexey Kardashevskiy spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size 2951a19f7fb0SDavid Gibson + 0x1ffff) & ~0xffff; 2952a19f7fb0SDavid Gibson spapr->initrd_size = load_image_targphys(initrd_filename, 2953a19f7fb0SDavid Gibson spapr->initrd_base, 2954a19f7fb0SDavid Gibson load_limit 2955a19f7fb0SDavid Gibson - spapr->initrd_base); 2956a19f7fb0SDavid Gibson if (spapr->initrd_size < 0) { 2957d54e4d76SDavid Gibson error_report("could not load initial ram disk '%s'", 295853018216SPaolo Bonzini initrd_filename); 295953018216SPaolo Bonzini exit(1); 296053018216SPaolo Bonzini } 296153018216SPaolo Bonzini } 296253018216SPaolo Bonzini } 296353018216SPaolo Bonzini 29648e7ea787SAndreas Färber if (bios_name == NULL) { 29658e7ea787SAndreas Färber bios_name = FW_FILE_NAME; 29668e7ea787SAndreas Färber } 29678e7ea787SAndreas Färber filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 29684c56440dSStefan Weil if (!filename) { 296968fea5a0SThomas Huth error_report("Could not find LPAR firmware '%s'", bios_name); 29704c56440dSStefan Weil exit(1); 29714c56440dSStefan Weil } 297253018216SPaolo Bonzini fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); 297368fea5a0SThomas Huth if (fw_size <= 0) { 297468fea5a0SThomas Huth error_report("Could not load LPAR firmware '%s'", filename); 297553018216SPaolo Bonzini exit(1); 297653018216SPaolo Bonzini } 297753018216SPaolo Bonzini g_free(filename); 297853018216SPaolo Bonzini 297928e02042SDavid Gibson /* FIXME: Should register things through the MachineState's qdev 298028e02042SDavid Gibson * interface, this is a legacy from the sPAPREnvironment structure 298128e02042SDavid Gibson * which predated MachineState but had a similar function */ 29824be21d56SDavid Gibson vmstate_register(NULL, 0, &vmstate_spapr, spapr); 29831df2c9a2SPeter Xu register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1, 29844be21d56SDavid Gibson &savevm_htab_handlers, spapr); 29854be21d56SDavid Gibson 29869bc6bfdfSMarkus Armbruster qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine)); 2987bb2bdd81SGreg Kurz 29885b2128d2SAlexander Graf qemu_register_boot_set(spapr_boot_set, spapr); 298942043e4fSLaurent Vivier 299093eac7b8SNicholas Piggin /* 299193eac7b8SNicholas Piggin * Nothing needs to be done to resume a suspended guest because 299293eac7b8SNicholas Piggin * suspending does not change the machine state, so no need for 299393eac7b8SNicholas Piggin * a ->wakeup method. 299493eac7b8SNicholas Piggin */ 299593eac7b8SNicholas Piggin qemu_register_wakeup_support(); 299693eac7b8SNicholas Piggin 299742043e4fSLaurent Vivier if (kvm_enabled()) { 29983dc410aeSAlexey Kardashevskiy /* to stop and start vmclock */ 299942043e4fSLaurent Vivier qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change, 300042043e4fSLaurent Vivier &spapr->tb); 30013dc410aeSAlexey Kardashevskiy 30023dc410aeSAlexey Kardashevskiy kvmppc_spapr_enable_inkernel_multitce(); 300342043e4fSLaurent Vivier } 30049ac703acSAravinda Prasad 30058af7e1feSNicholas Piggin qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond); 300653018216SPaolo Bonzini } 300753018216SPaolo Bonzini 3008dc0ca80eSEric Auger static int spapr_kvm_type(MachineState *machine, const char *vm_type) 3009135a129aSAneesh Kumar K.V { 3010135a129aSAneesh Kumar K.V if (!vm_type) { 3011135a129aSAneesh Kumar K.V return 0; 3012135a129aSAneesh Kumar K.V } 3013135a129aSAneesh Kumar K.V 3014135a129aSAneesh Kumar K.V if (!strcmp(vm_type, "HV")) { 3015135a129aSAneesh Kumar K.V return 1; 3016135a129aSAneesh Kumar K.V } 3017135a129aSAneesh Kumar K.V 3018135a129aSAneesh Kumar K.V if (!strcmp(vm_type, "PR")) { 3019135a129aSAneesh Kumar K.V return 2; 3020135a129aSAneesh Kumar K.V } 3021135a129aSAneesh Kumar K.V 3022135a129aSAneesh Kumar K.V error_report("Unknown kvm-type specified '%s'", vm_type); 3023135a129aSAneesh Kumar K.V exit(1); 3024135a129aSAneesh Kumar K.V } 3025135a129aSAneesh Kumar K.V 302671461b0fSAlexey Kardashevskiy /* 3027627b84f4SGonglei * Implementation of an interface to adjust firmware path 302871461b0fSAlexey Kardashevskiy * for the bootindex property handling. 302971461b0fSAlexey Kardashevskiy */ 303071461b0fSAlexey Kardashevskiy static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus, 303171461b0fSAlexey Kardashevskiy DeviceState *dev) 303271461b0fSAlexey Kardashevskiy { 303371461b0fSAlexey Kardashevskiy #define CAST(type, obj, name) \ 303471461b0fSAlexey Kardashevskiy ((type *)object_dynamic_cast(OBJECT(obj), (name))) 303571461b0fSAlexey Kardashevskiy SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE); 3036ce2918cbSDavid Gibson SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE); 3037c4e13492SFelipe Franciosi VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON); 303871461b0fSAlexey Kardashevskiy 303971461b0fSAlexey Kardashevskiy if (d) { 304071461b0fSAlexey Kardashevskiy void *spapr = CAST(void, bus->parent, "spapr-vscsi"); 304171461b0fSAlexey Kardashevskiy VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI); 304271461b0fSAlexey Kardashevskiy USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE); 304371461b0fSAlexey Kardashevskiy 304471461b0fSAlexey Kardashevskiy if (spapr) { 304571461b0fSAlexey Kardashevskiy /* 304671461b0fSAlexey Kardashevskiy * Replace "channel@0/disk@0,0" with "disk@8000000000000000": 30471ac24c91SThomas Huth * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form 30481ac24c91SThomas Huth * 0x8000 | (target << 8) | (bus << 5) | lun 30491ac24c91SThomas Huth * (see the "Logical unit addressing format" table in SAM5) 305071461b0fSAlexey Kardashevskiy */ 30511ac24c91SThomas Huth unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun; 305271461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 305371461b0fSAlexey Kardashevskiy (uint64_t)id << 48); 305471461b0fSAlexey Kardashevskiy } else if (virtio) { 305571461b0fSAlexey Kardashevskiy /* 305671461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (target << 8) | lun 305771461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 305871461b0fSAlexey Kardashevskiy * Note: the quote above is from SLOF and it is wrong, 305971461b0fSAlexey Kardashevskiy * the actual binding is: 306071461b0fSAlexey Kardashevskiy * swap 0100 or 10 << or 20 << ( target lun-id -- srplun ) 306171461b0fSAlexey Kardashevskiy */ 306271461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (d->id << 16) | d->lun; 3063bac658d1SThomas Huth if (d->lun >= 256) { 3064bac658d1SThomas Huth /* Use the LUN "flat space addressing method" */ 3065bac658d1SThomas Huth id |= 0x4000; 3066bac658d1SThomas Huth } 306771461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 306871461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 306971461b0fSAlexey Kardashevskiy } else if (usb) { 307071461b0fSAlexey Kardashevskiy /* 307171461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (usb-port << 16) | lun 307271461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 307371461b0fSAlexey Kardashevskiy */ 307471461b0fSAlexey Kardashevskiy unsigned usb_port = atoi(usb->port->path); 307571461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (usb_port << 16) | d->lun; 307671461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 307771461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 307871461b0fSAlexey Kardashevskiy } 307971461b0fSAlexey Kardashevskiy } 308071461b0fSAlexey Kardashevskiy 3081b99260ebSThomas Huth /* 3082b99260ebSThomas Huth * SLOF probes the USB devices, and if it recognizes that the device is a 3083b99260ebSThomas Huth * storage device, it changes its name to "storage" instead of "usb-host", 3084b99260ebSThomas Huth * and additionally adds a child node for the SCSI LUN, so the correct 3085b99260ebSThomas Huth * boot path in SLOF is something like .../storage@1/disk@xxx" instead. 3086b99260ebSThomas Huth */ 3087b99260ebSThomas Huth if (strcmp("usb-host", qdev_fw_name(dev)) == 0) { 3088b99260ebSThomas Huth USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE); 3089b99260ebSThomas Huth if (usb_host_dev_is_scsi_storage(usbdev)) { 3090b99260ebSThomas Huth return g_strdup_printf("storage@%s/disk", usbdev->port->path); 3091b99260ebSThomas Huth } 3092b99260ebSThomas Huth } 3093b99260ebSThomas Huth 309471461b0fSAlexey Kardashevskiy if (phb) { 309571461b0fSAlexey Kardashevskiy /* Replace "pci" with "pci@800000020000000" */ 309671461b0fSAlexey Kardashevskiy return g_strdup_printf("pci@%"PRIX64, phb->buid); 309771461b0fSAlexey Kardashevskiy } 309871461b0fSAlexey Kardashevskiy 3099c4e13492SFelipe Franciosi if (vsc) { 3100c4e13492SFelipe Franciosi /* Same logic as virtio above */ 3101c4e13492SFelipe Franciosi unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun; 3102c4e13492SFelipe Franciosi return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32); 3103c4e13492SFelipe Franciosi } 3104c4e13492SFelipe Franciosi 31054871dd4cSThomas Huth if (g_str_equal("pci-bridge", qdev_fw_name(dev))) { 31064871dd4cSThomas Huth /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */ 31074871dd4cSThomas Huth PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); 31084871dd4cSThomas Huth return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn)); 31094871dd4cSThomas Huth } 31104871dd4cSThomas Huth 311171461b0fSAlexey Kardashevskiy return NULL; 311271461b0fSAlexey Kardashevskiy } 311371461b0fSAlexey Kardashevskiy 311423825581SEduardo Habkost static char *spapr_get_kvm_type(Object *obj, Error **errp) 311523825581SEduardo Habkost { 3116ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 311723825581SEduardo Habkost 311828e02042SDavid Gibson return g_strdup(spapr->kvm_type); 311923825581SEduardo Habkost } 312023825581SEduardo Habkost 312123825581SEduardo Habkost static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp) 312223825581SEduardo Habkost { 3123ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 312423825581SEduardo Habkost 312528e02042SDavid Gibson g_free(spapr->kvm_type); 312628e02042SDavid Gibson spapr->kvm_type = g_strdup(value); 312723825581SEduardo Habkost } 312823825581SEduardo Habkost 3129f6229214SMichael Roth static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp) 3130f6229214SMichael Roth { 3131ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3132f6229214SMichael Roth 3133f6229214SMichael Roth return spapr->use_hotplug_event_source; 3134f6229214SMichael Roth } 3135f6229214SMichael Roth 3136f6229214SMichael Roth static void spapr_set_modern_hotplug_events(Object *obj, bool value, 3137f6229214SMichael Roth Error **errp) 3138f6229214SMichael Roth { 3139ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3140f6229214SMichael Roth 3141f6229214SMichael Roth spapr->use_hotplug_event_source = value; 3142f6229214SMichael Roth } 3143f6229214SMichael Roth 3144fcad0d21SAlexey Kardashevskiy static bool spapr_get_msix_emulation(Object *obj, Error **errp) 3145fcad0d21SAlexey Kardashevskiy { 3146fcad0d21SAlexey Kardashevskiy return true; 3147fcad0d21SAlexey Kardashevskiy } 3148fcad0d21SAlexey Kardashevskiy 314930f4b05bSDavid Gibson static char *spapr_get_resize_hpt(Object *obj, Error **errp) 315030f4b05bSDavid Gibson { 3151ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 315230f4b05bSDavid Gibson 315330f4b05bSDavid Gibson switch (spapr->resize_hpt) { 315430f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DEFAULT: 315530f4b05bSDavid Gibson return g_strdup("default"); 315630f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DISABLED: 315730f4b05bSDavid Gibson return g_strdup("disabled"); 315830f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_ENABLED: 315930f4b05bSDavid Gibson return g_strdup("enabled"); 316030f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_REQUIRED: 316130f4b05bSDavid Gibson return g_strdup("required"); 316230f4b05bSDavid Gibson } 316330f4b05bSDavid Gibson g_assert_not_reached(); 316430f4b05bSDavid Gibson } 316530f4b05bSDavid Gibson 316630f4b05bSDavid Gibson static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp) 316730f4b05bSDavid Gibson { 3168ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 316930f4b05bSDavid Gibson 317030f4b05bSDavid Gibson if (strcmp(value, "default") == 0) { 317130f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT; 317230f4b05bSDavid Gibson } else if (strcmp(value, "disabled") == 0) { 317330f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 317430f4b05bSDavid Gibson } else if (strcmp(value, "enabled") == 0) { 317530f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED; 317630f4b05bSDavid Gibson } else if (strcmp(value, "required") == 0) { 317730f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED; 317830f4b05bSDavid Gibson } else { 317930f4b05bSDavid Gibson error_setg(errp, "Bad value for \"resize-hpt\" property"); 318030f4b05bSDavid Gibson } 318130f4b05bSDavid Gibson } 318230f4b05bSDavid Gibson 31833ba3d0bcSCédric Le Goater static char *spapr_get_ic_mode(Object *obj, Error **errp) 31843ba3d0bcSCédric Le Goater { 3185ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 31863ba3d0bcSCédric Le Goater 31873ba3d0bcSCédric Le Goater if (spapr->irq == &spapr_irq_xics_legacy) { 31883ba3d0bcSCédric Le Goater return g_strdup("legacy"); 31893ba3d0bcSCédric Le Goater } else if (spapr->irq == &spapr_irq_xics) { 31903ba3d0bcSCédric Le Goater return g_strdup("xics"); 31913ba3d0bcSCédric Le Goater } else if (spapr->irq == &spapr_irq_xive) { 31923ba3d0bcSCédric Le Goater return g_strdup("xive"); 319313db0cd9SCédric Le Goater } else if (spapr->irq == &spapr_irq_dual) { 319413db0cd9SCédric Le Goater return g_strdup("dual"); 31953ba3d0bcSCédric Le Goater } 31963ba3d0bcSCédric Le Goater g_assert_not_reached(); 31973ba3d0bcSCédric Le Goater } 31983ba3d0bcSCédric Le Goater 31993ba3d0bcSCédric Le Goater static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp) 32003ba3d0bcSCédric Le Goater { 3201ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 32023ba3d0bcSCédric Le Goater 320321df5e4fSGreg Kurz if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 320421df5e4fSGreg Kurz error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode"); 320521df5e4fSGreg Kurz return; 320621df5e4fSGreg Kurz } 320721df5e4fSGreg Kurz 32083ba3d0bcSCédric Le Goater /* The legacy IRQ backend can not be set */ 32093ba3d0bcSCédric Le Goater if (strcmp(value, "xics") == 0) { 32103ba3d0bcSCédric Le Goater spapr->irq = &spapr_irq_xics; 32113ba3d0bcSCédric Le Goater } else if (strcmp(value, "xive") == 0) { 32123ba3d0bcSCédric Le Goater spapr->irq = &spapr_irq_xive; 321313db0cd9SCédric Le Goater } else if (strcmp(value, "dual") == 0) { 321413db0cd9SCédric Le Goater spapr->irq = &spapr_irq_dual; 32153ba3d0bcSCédric Le Goater } else { 32163ba3d0bcSCédric Le Goater error_setg(errp, "Bad value for \"ic-mode\" property"); 32173ba3d0bcSCédric Le Goater } 32183ba3d0bcSCédric Le Goater } 32193ba3d0bcSCédric Le Goater 322027461d69SPrasad J Pandit static char *spapr_get_host_model(Object *obj, Error **errp) 322127461d69SPrasad J Pandit { 3222ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 322327461d69SPrasad J Pandit 322427461d69SPrasad J Pandit return g_strdup(spapr->host_model); 322527461d69SPrasad J Pandit } 322627461d69SPrasad J Pandit 322727461d69SPrasad J Pandit static void spapr_set_host_model(Object *obj, const char *value, Error **errp) 322827461d69SPrasad J Pandit { 3229ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 323027461d69SPrasad J Pandit 323127461d69SPrasad J Pandit g_free(spapr->host_model); 323227461d69SPrasad J Pandit spapr->host_model = g_strdup(value); 323327461d69SPrasad J Pandit } 323427461d69SPrasad J Pandit 323527461d69SPrasad J Pandit static char *spapr_get_host_serial(Object *obj, Error **errp) 323627461d69SPrasad J Pandit { 3237ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 323827461d69SPrasad J Pandit 323927461d69SPrasad J Pandit return g_strdup(spapr->host_serial); 324027461d69SPrasad J Pandit } 324127461d69SPrasad J Pandit 324227461d69SPrasad J Pandit static void spapr_set_host_serial(Object *obj, const char *value, Error **errp) 324327461d69SPrasad J Pandit { 3244ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 324527461d69SPrasad J Pandit 324627461d69SPrasad J Pandit g_free(spapr->host_serial); 324727461d69SPrasad J Pandit spapr->host_serial = g_strdup(value); 324827461d69SPrasad J Pandit } 324927461d69SPrasad J Pandit 3250bcb5ce08SDavid Gibson static void spapr_instance_init(Object *obj) 325123825581SEduardo Habkost { 3252ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3253ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 3254715c5407SDavid Gibson 3255715c5407SDavid Gibson spapr->htab_fd = -1; 3256f6229214SMichael Roth spapr->use_hotplug_event_source = true; 325723825581SEduardo Habkost object_property_add_str(obj, "kvm-type", 3258d2623129SMarkus Armbruster spapr_get_kvm_type, spapr_set_kvm_type); 325949d2e648SMarcel Apfelbaum object_property_set_description(obj, "kvm-type", 32607eecec7dSMarkus Armbruster "Specifies the KVM virtualization mode (HV, PR)"); 3261f6229214SMichael Roth object_property_add_bool(obj, "modern-hotplug-events", 3262f6229214SMichael Roth spapr_get_modern_hotplug_events, 3263d2623129SMarkus Armbruster spapr_set_modern_hotplug_events); 3264f6229214SMichael Roth object_property_set_description(obj, "modern-hotplug-events", 3265f6229214SMichael Roth "Use dedicated hotplug event mechanism in" 3266f6229214SMichael Roth " place of standard EPOW events when possible" 32677eecec7dSMarkus Armbruster " (required for memory hot-unplug support)"); 32687843c0d6SDavid Gibson ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr, 326940c2281cSMarkus Armbruster "Maximum permitted CPU compatibility mode"); 327030f4b05bSDavid Gibson 327130f4b05bSDavid Gibson object_property_add_str(obj, "resize-hpt", 3272d2623129SMarkus Armbruster spapr_get_resize_hpt, spapr_set_resize_hpt); 327330f4b05bSDavid Gibson object_property_set_description(obj, "resize-hpt", 32747eecec7dSMarkus Armbruster "Resizing of the Hash Page Table (enabled, disabled, required)"); 327564a7b8deSFelipe Franciosi object_property_add_uint32_ptr(obj, "vsmt", 3276d2623129SMarkus Armbruster &spapr->vsmt, OBJ_PROP_FLAG_READWRITE); 3277fa98fbfcSSam Bobroff object_property_set_description(obj, "vsmt", 3278fa98fbfcSSam Bobroff "Virtual SMT: KVM behaves as if this were" 32797eecec7dSMarkus Armbruster " the host's SMT mode"); 328064a7b8deSFelipe Franciosi 3281fcad0d21SAlexey Kardashevskiy object_property_add_bool(obj, "vfio-no-msix-emulation", 3282d2623129SMarkus Armbruster spapr_get_msix_emulation, NULL); 32833ba3d0bcSCédric Le Goater 328464a7b8deSFelipe Franciosi object_property_add_uint64_ptr(obj, "kernel-addr", 3285d2623129SMarkus Armbruster &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE); 328687262806SAlexey Kardashevskiy object_property_set_description(obj, "kernel-addr", 328787262806SAlexey Kardashevskiy stringify(KERNEL_LOAD_ADDR) 32887eecec7dSMarkus Armbruster " for -kernel is the default"); 328987262806SAlexey Kardashevskiy spapr->kernel_addr = KERNEL_LOAD_ADDR; 32903ba3d0bcSCédric Le Goater /* The machine class defines the default interrupt controller mode */ 32913ba3d0bcSCédric Le Goater spapr->irq = smc->irq; 32923ba3d0bcSCédric Le Goater object_property_add_str(obj, "ic-mode", spapr_get_ic_mode, 3293d2623129SMarkus Armbruster spapr_set_ic_mode); 32943ba3d0bcSCédric Le Goater object_property_set_description(obj, "ic-mode", 32957eecec7dSMarkus Armbruster "Specifies the interrupt controller mode (xics, xive, dual)"); 329627461d69SPrasad J Pandit 329727461d69SPrasad J Pandit object_property_add_str(obj, "host-model", 3298d2623129SMarkus Armbruster spapr_get_host_model, spapr_set_host_model); 329927461d69SPrasad J Pandit object_property_set_description(obj, "host-model", 33007eecec7dSMarkus Armbruster "Host model to advertise in guest device tree"); 330127461d69SPrasad J Pandit object_property_add_str(obj, "host-serial", 3302d2623129SMarkus Armbruster spapr_get_host_serial, spapr_set_host_serial); 330327461d69SPrasad J Pandit object_property_set_description(obj, "host-serial", 33047eecec7dSMarkus Armbruster "Host serial number to advertise in guest device tree"); 330523825581SEduardo Habkost } 330623825581SEduardo Habkost 330787bbdd9cSDavid Gibson static void spapr_machine_finalizefn(Object *obj) 330887bbdd9cSDavid Gibson { 3309ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 331087bbdd9cSDavid Gibson 331187bbdd9cSDavid Gibson g_free(spapr->kvm_type); 331287bbdd9cSDavid Gibson } 331387bbdd9cSDavid Gibson 33141c7ad77eSNicholas Piggin void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg) 331534316482SAlexey Kardashevskiy { 33160e236d34SNicholas Piggin SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 3317b5b7f391SNicholas Piggin PowerPCCPU *cpu = POWERPC_CPU(cs); 3318b5b7f391SNicholas Piggin CPUPPCState *env = &cpu->env; 33190e236d34SNicholas Piggin 332034316482SAlexey Kardashevskiy cpu_synchronize_state(cs); 33210e236d34SNicholas Piggin /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */ 33220e236d34SNicholas Piggin if (spapr->fwnmi_system_reset_addr != -1) { 33230e236d34SNicholas Piggin uint64_t rtas_addr, addr; 33240e236d34SNicholas Piggin 33250e236d34SNicholas Piggin /* get rtas addr from fdt */ 33260e236d34SNicholas Piggin rtas_addr = spapr_get_rtas_addr(); 33270e236d34SNicholas Piggin if (!rtas_addr) { 33280e236d34SNicholas Piggin qemu_system_guest_panicked(NULL); 33290e236d34SNicholas Piggin return; 33300e236d34SNicholas Piggin } 33310e236d34SNicholas Piggin 33320e236d34SNicholas Piggin addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2; 33330e236d34SNicholas Piggin stq_be_phys(&address_space_memory, addr, env->gpr[3]); 33340e236d34SNicholas Piggin stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0); 33350e236d34SNicholas Piggin env->gpr[3] = addr; 33360e236d34SNicholas Piggin } 3337b5b7f391SNicholas Piggin ppc_cpu_do_system_reset(cs); 3338b5b7f391SNicholas Piggin if (spapr->fwnmi_system_reset_addr != -1) { 3339b5b7f391SNicholas Piggin env->nip = spapr->fwnmi_system_reset_addr; 3340b5b7f391SNicholas Piggin } 334134316482SAlexey Kardashevskiy } 334234316482SAlexey Kardashevskiy 334334316482SAlexey Kardashevskiy static void spapr_nmi(NMIState *n, int cpu_index, Error **errp) 334434316482SAlexey Kardashevskiy { 334534316482SAlexey Kardashevskiy CPUState *cs; 334634316482SAlexey Kardashevskiy 334734316482SAlexey Kardashevskiy CPU_FOREACH(cs) { 33481c7ad77eSNicholas Piggin async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); 334934316482SAlexey Kardashevskiy } 335034316482SAlexey Kardashevskiy } 335134316482SAlexey Kardashevskiy 3352ce2918cbSDavid Gibson int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 335362d38c9bSGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 335462d38c9bSGreg Kurz { 335562d38c9bSGreg Kurz uint64_t addr; 335662d38c9bSGreg Kurz uint32_t node; 335762d38c9bSGreg Kurz 335862d38c9bSGreg Kurz addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE; 335962d38c9bSGreg Kurz node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP, 336062d38c9bSGreg Kurz &error_abort); 3361f1aa45ffSDaniel Henrique Barboza *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr, 336262d38c9bSGreg Kurz SPAPR_MEMORY_BLOCK_SIZE); 336362d38c9bSGreg Kurz return 0; 336462d38c9bSGreg Kurz } 336562d38c9bSGreg Kurz 336679b78a6bSMichael Roth static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size, 336762d38c9bSGreg Kurz bool dedicated_hp_event_source, Error **errp) 3368c20d332aSBharata B Rao { 3369ce2918cbSDavid Gibson SpaprDrc *drc; 3370c20d332aSBharata B Rao uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE; 337162d38c9bSGreg Kurz int i; 337279b78a6bSMichael Roth uint64_t addr = addr_start; 337394fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 3374160bb678SGreg Kurz Error *local_err = NULL; 3375c20d332aSBharata B Rao 3376c20d332aSBharata B Rao for (i = 0; i < nr_lmbs; i++) { 3377fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 3378c20d332aSBharata B Rao addr / SPAPR_MEMORY_BLOCK_SIZE); 3379c20d332aSBharata B Rao g_assert(drc); 3380c20d332aSBharata B Rao 338109d876ceSGreg Kurz spapr_drc_attach(drc, dev, &local_err); 3382160bb678SGreg Kurz if (local_err) { 3383160bb678SGreg Kurz while (addr > addr_start) { 3384160bb678SGreg Kurz addr -= SPAPR_MEMORY_BLOCK_SIZE; 3385160bb678SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 3386160bb678SGreg Kurz addr / SPAPR_MEMORY_BLOCK_SIZE); 3387a8dc47fdSDavid Gibson spapr_drc_detach(drc); 3388160bb678SGreg Kurz } 3389160bb678SGreg Kurz error_propagate(errp, local_err); 3390160bb678SGreg Kurz return; 3391160bb678SGreg Kurz } 339294fd9cbaSLaurent Vivier if (!hotplugged) { 339394fd9cbaSLaurent Vivier spapr_drc_reset(drc); 339494fd9cbaSLaurent Vivier } 3395c20d332aSBharata B Rao addr += SPAPR_MEMORY_BLOCK_SIZE; 3396c20d332aSBharata B Rao } 33975dd5238cSJianjun Duan /* send hotplug notification to the 33985dd5238cSJianjun Duan * guest only in case of hotplugged memory 33995dd5238cSJianjun Duan */ 340094fd9cbaSLaurent Vivier if (hotplugged) { 340179b78a6bSMichael Roth if (dedicated_hp_event_source) { 3402fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 340379b78a6bSMichael Roth addr_start / SPAPR_MEMORY_BLOCK_SIZE); 340479b78a6bSMichael Roth spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 340579b78a6bSMichael Roth nr_lmbs, 34060b55aa91SDavid Gibson spapr_drc_index(drc)); 340779b78a6bSMichael Roth } else { 340879b78a6bSMichael Roth spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB, 340979b78a6bSMichael Roth nr_lmbs); 341079b78a6bSMichael Roth } 3411c20d332aSBharata B Rao } 34125dd5238cSJianjun Duan } 3413c20d332aSBharata B Rao 3414c20d332aSBharata B Rao static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 341581985f3bSDavid Hildenbrand Error **errp) 3416c20d332aSBharata B Rao { 3417c20d332aSBharata B Rao Error *local_err = NULL; 3418ce2918cbSDavid Gibson SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev); 3419c20d332aSBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 3420ee3a71e3SShivaprasad G Bhat uint64_t size, addr, slot; 3421ee3a71e3SShivaprasad G Bhat bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); 342204790978SThomas Huth 3423946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort); 3424df587133SThomas Huth 3425fd3416f5SDavid Hildenbrand pc_dimm_plug(dimm, MACHINE(ms), &local_err); 3426c20d332aSBharata B Rao if (local_err) { 3427c20d332aSBharata B Rao goto out; 3428c20d332aSBharata B Rao } 3429c20d332aSBharata B Rao 3430ee3a71e3SShivaprasad G Bhat if (!is_nvdimm) { 34319ed442b8SMarc-André Lureau addr = object_property_get_uint(OBJECT(dimm), 34329ed442b8SMarc-André Lureau PC_DIMM_ADDR_PROP, &local_err); 3433c20d332aSBharata B Rao if (local_err) { 3434160bb678SGreg Kurz goto out_unplug; 3435c20d332aSBharata B Rao } 3436ee3a71e3SShivaprasad G Bhat spapr_add_lmbs(dev, addr, size, 3437ee3a71e3SShivaprasad G Bhat spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT), 3438160bb678SGreg Kurz &local_err); 3439ee3a71e3SShivaprasad G Bhat } else { 3440ee3a71e3SShivaprasad G Bhat slot = object_property_get_uint(OBJECT(dimm), 3441ee3a71e3SShivaprasad G Bhat PC_DIMM_SLOT_PROP, &local_err); 3442ee3a71e3SShivaprasad G Bhat if (local_err) { 3443ee3a71e3SShivaprasad G Bhat goto out_unplug; 3444ee3a71e3SShivaprasad G Bhat } 3445ee3a71e3SShivaprasad G Bhat spapr_add_nvdimm(dev, slot, &local_err); 3446ee3a71e3SShivaprasad G Bhat } 3447ee3a71e3SShivaprasad G Bhat 3448160bb678SGreg Kurz if (local_err) { 3449160bb678SGreg Kurz goto out_unplug; 3450160bb678SGreg Kurz } 3451c20d332aSBharata B Rao 3452160bb678SGreg Kurz return; 3453160bb678SGreg Kurz 3454160bb678SGreg Kurz out_unplug: 3455fd3416f5SDavid Hildenbrand pc_dimm_unplug(dimm, MACHINE(ms)); 3456c20d332aSBharata B Rao out: 3457c20d332aSBharata B Rao error_propagate(errp, local_err); 3458c20d332aSBharata B Rao } 3459c20d332aSBharata B Rao 3460c871bc70SLaurent Vivier static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3461c871bc70SLaurent Vivier Error **errp) 3462c871bc70SLaurent Vivier { 3463ce2918cbSDavid Gibson const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev); 3464ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3465ee3a71e3SShivaprasad G Bhat bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); 3466c871bc70SLaurent Vivier PCDIMMDevice *dimm = PC_DIMM(dev); 34678f1ffe5bSDavid Hildenbrand Error *local_err = NULL; 346804790978SThomas Huth uint64_t size; 3469123eec65SDavid Gibson Object *memdev; 3470123eec65SDavid Gibson hwaddr pagesize; 3471c871bc70SLaurent Vivier 34724e8a01bdSDavid Hildenbrand if (!smc->dr_lmb_enabled) { 34734e8a01bdSDavid Hildenbrand error_setg(errp, "Memory hotplug not supported for this machine"); 34744e8a01bdSDavid Hildenbrand return; 34754e8a01bdSDavid Hildenbrand } 34764e8a01bdSDavid Hildenbrand 3477946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err); 3478946d6154SDavid Hildenbrand if (local_err) { 3479946d6154SDavid Hildenbrand error_propagate(errp, local_err); 348004790978SThomas Huth return; 348104790978SThomas Huth } 348204790978SThomas Huth 3483beb6073fSDaniel Henrique Barboza if (is_nvdimm) { 3484beb6073fSDaniel Henrique Barboza spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, &local_err); 3485ee3a71e3SShivaprasad G Bhat if (local_err) { 3486ee3a71e3SShivaprasad G Bhat error_propagate(errp, local_err); 3487ee3a71e3SShivaprasad G Bhat return; 3488ee3a71e3SShivaprasad G Bhat } 3489beb6073fSDaniel Henrique Barboza } else if (size % SPAPR_MEMORY_BLOCK_SIZE) { 3490beb6073fSDaniel Henrique Barboza error_setg(errp, "Hotplugged memory size must be a multiple of " 3491beb6073fSDaniel Henrique Barboza "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB); 3492beb6073fSDaniel Henrique Barboza return; 3493c871bc70SLaurent Vivier } 3494c871bc70SLaurent Vivier 3495123eec65SDavid Gibson memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, 3496123eec65SDavid Gibson &error_abort); 3497123eec65SDavid Gibson pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev)); 34988f1ffe5bSDavid Hildenbrand spapr_check_pagesize(spapr, pagesize, &local_err); 34998f1ffe5bSDavid Hildenbrand if (local_err) { 35008f1ffe5bSDavid Hildenbrand error_propagate(errp, local_err); 35018f1ffe5bSDavid Hildenbrand return; 35028f1ffe5bSDavid Hildenbrand } 35038f1ffe5bSDavid Hildenbrand 3504fd3416f5SDavid Hildenbrand pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp); 3505c871bc70SLaurent Vivier } 3506c871bc70SLaurent Vivier 3507ce2918cbSDavid Gibson struct SpaprDimmState { 35080cffce56SDavid Gibson PCDIMMDevice *dimm; 3509cf632463SBharata B Rao uint32_t nr_lmbs; 3510ce2918cbSDavid Gibson QTAILQ_ENTRY(SpaprDimmState) next; 35110cffce56SDavid Gibson }; 35120cffce56SDavid Gibson 3513ce2918cbSDavid Gibson static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s, 35140cffce56SDavid Gibson PCDIMMDevice *dimm) 35150cffce56SDavid Gibson { 3516ce2918cbSDavid Gibson SpaprDimmState *dimm_state = NULL; 35170cffce56SDavid Gibson 35180cffce56SDavid Gibson QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) { 35190cffce56SDavid Gibson if (dimm_state->dimm == dimm) { 35200cffce56SDavid Gibson break; 35210cffce56SDavid Gibson } 35220cffce56SDavid Gibson } 35230cffce56SDavid Gibson return dimm_state; 35240cffce56SDavid Gibson } 35250cffce56SDavid Gibson 3526ce2918cbSDavid Gibson static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr, 35278d5981c4SBharata B Rao uint32_t nr_lmbs, 35288d5981c4SBharata B Rao PCDIMMDevice *dimm) 35290cffce56SDavid Gibson { 3530ce2918cbSDavid Gibson SpaprDimmState *ds = NULL; 35318d5981c4SBharata B Rao 35328d5981c4SBharata B Rao /* 35338d5981c4SBharata B Rao * If this request is for a DIMM whose removal had failed earlier 35348d5981c4SBharata B Rao * (due to guest's refusal to remove the LMBs), we would have this 35358d5981c4SBharata B Rao * dimm already in the pending_dimm_unplugs list. In that 35368d5981c4SBharata B Rao * case don't add again. 35378d5981c4SBharata B Rao */ 35388d5981c4SBharata B Rao ds = spapr_pending_dimm_unplugs_find(spapr, dimm); 35398d5981c4SBharata B Rao if (!ds) { 3540ce2918cbSDavid Gibson ds = g_malloc0(sizeof(SpaprDimmState)); 35418d5981c4SBharata B Rao ds->nr_lmbs = nr_lmbs; 35428d5981c4SBharata B Rao ds->dimm = dimm; 35438d5981c4SBharata B Rao QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next); 35448d5981c4SBharata B Rao } 35458d5981c4SBharata B Rao return ds; 35460cffce56SDavid Gibson } 35470cffce56SDavid Gibson 3548ce2918cbSDavid Gibson static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr, 3549ce2918cbSDavid Gibson SpaprDimmState *dimm_state) 35500cffce56SDavid Gibson { 35510cffce56SDavid Gibson QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next); 35520cffce56SDavid Gibson g_free(dimm_state); 35530cffce56SDavid Gibson } 3554cf632463SBharata B Rao 3555ce2918cbSDavid Gibson static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms, 355616ee9980SDaniel Henrique Barboza PCDIMMDevice *dimm) 355716ee9980SDaniel Henrique Barboza { 3558ce2918cbSDavid Gibson SpaprDrc *drc; 3559946d6154SDavid Hildenbrand uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm), 3560946d6154SDavid Hildenbrand &error_abort); 356116ee9980SDaniel Henrique Barboza uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 356216ee9980SDaniel Henrique Barboza uint32_t avail_lmbs = 0; 356316ee9980SDaniel Henrique Barboza uint64_t addr_start, addr; 356416ee9980SDaniel Henrique Barboza int i; 356516ee9980SDaniel Henrique Barboza 356616ee9980SDaniel Henrique Barboza addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, 356716ee9980SDaniel Henrique Barboza &error_abort); 356816ee9980SDaniel Henrique Barboza 356916ee9980SDaniel Henrique Barboza addr = addr_start; 357016ee9980SDaniel Henrique Barboza for (i = 0; i < nr_lmbs; i++) { 3571fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 357216ee9980SDaniel Henrique Barboza addr / SPAPR_MEMORY_BLOCK_SIZE); 357316ee9980SDaniel Henrique Barboza g_assert(drc); 3574454b580aSDavid Gibson if (drc->dev) { 357516ee9980SDaniel Henrique Barboza avail_lmbs++; 357616ee9980SDaniel Henrique Barboza } 357716ee9980SDaniel Henrique Barboza addr += SPAPR_MEMORY_BLOCK_SIZE; 357816ee9980SDaniel Henrique Barboza } 357916ee9980SDaniel Henrique Barboza 35808d5981c4SBharata B Rao return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm); 358116ee9980SDaniel Henrique Barboza } 358216ee9980SDaniel Henrique Barboza 358331834723SDaniel Henrique Barboza /* Callback to be called during DRC release. */ 358431834723SDaniel Henrique Barboza void spapr_lmb_release(DeviceState *dev) 3585cf632463SBharata B Rao { 35863ec71474SDavid Hildenbrand HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3587ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl); 3588ce2918cbSDavid Gibson SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); 3589cf632463SBharata B Rao 359016ee9980SDaniel Henrique Barboza /* This information will get lost if a migration occurs 359116ee9980SDaniel Henrique Barboza * during the unplug process. In this case recover it. */ 359216ee9980SDaniel Henrique Barboza if (ds == NULL) { 359316ee9980SDaniel Henrique Barboza ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev)); 35948d5981c4SBharata B Rao g_assert(ds); 3595454b580aSDavid Gibson /* The DRC being examined by the caller at least must be counted */ 3596454b580aSDavid Gibson g_assert(ds->nr_lmbs); 359716ee9980SDaniel Henrique Barboza } 3598454b580aSDavid Gibson 3599454b580aSDavid Gibson if (--ds->nr_lmbs) { 3600cf632463SBharata B Rao return; 3601cf632463SBharata B Rao } 3602cf632463SBharata B Rao 3603cf632463SBharata B Rao /* 3604cf632463SBharata B Rao * Now that all the LMBs have been removed by the guest, call the 36053ec71474SDavid Hildenbrand * unplug handler chain. This can never fail. 3606cf632463SBharata B Rao */ 36073ec71474SDavid Hildenbrand hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 360807578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 36093ec71474SDavid Hildenbrand } 36103ec71474SDavid Hildenbrand 36113ec71474SDavid Hildenbrand static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 36123ec71474SDavid Hildenbrand { 3613ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3614ce2918cbSDavid Gibson SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); 36153ec71474SDavid Hildenbrand 3616fd3416f5SDavid Hildenbrand pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev)); 3617981c3dcdSMarkus Armbruster qdev_unrealize(dev); 36182a129767SDaniel Henrique Barboza spapr_pending_dimm_unplugs_remove(spapr, ds); 3619cf632463SBharata B Rao } 3620cf632463SBharata B Rao 3621cf632463SBharata B Rao static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev, 3622cf632463SBharata B Rao DeviceState *dev, Error **errp) 3623cf632463SBharata B Rao { 3624ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3625cf632463SBharata B Rao Error *local_err = NULL; 3626cf632463SBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 362704790978SThomas Huth uint32_t nr_lmbs; 362804790978SThomas Huth uint64_t size, addr_start, addr; 36290cffce56SDavid Gibson int i; 3630ce2918cbSDavid Gibson SpaprDrc *drc; 363104790978SThomas Huth 3632ee3a71e3SShivaprasad G Bhat if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { 3633dcfe4805SMarkus Armbruster error_setg(errp, "nvdimm device hot unplug is not supported yet."); 3634dcfe4805SMarkus Armbruster return; 3635ee3a71e3SShivaprasad G Bhat } 3636ee3a71e3SShivaprasad G Bhat 3637946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort); 363804790978SThomas Huth nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 363904790978SThomas Huth 36409ed442b8SMarc-André Lureau addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, 36410cffce56SDavid Gibson &local_err); 3642cf632463SBharata B Rao if (local_err) { 3643dcfe4805SMarkus Armbruster error_propagate(errp, local_err); 3644dcfe4805SMarkus Armbruster return; 3645cf632463SBharata B Rao } 3646cf632463SBharata B Rao 36472a129767SDaniel Henrique Barboza /* 36482a129767SDaniel Henrique Barboza * An existing pending dimm state for this DIMM means that there is an 36492a129767SDaniel Henrique Barboza * unplug operation in progress, waiting for the spapr_lmb_release 36502a129767SDaniel Henrique Barboza * callback to complete the job (BQL can't cover that far). In this case, 36512a129767SDaniel Henrique Barboza * bail out to avoid detaching DRCs that were already released. 36522a129767SDaniel Henrique Barboza */ 36532a129767SDaniel Henrique Barboza if (spapr_pending_dimm_unplugs_find(spapr, dimm)) { 3654dcfe4805SMarkus Armbruster error_setg(errp, "Memory unplug already in progress for device %s", 36552a129767SDaniel Henrique Barboza dev->id); 3656dcfe4805SMarkus Armbruster return; 36572a129767SDaniel Henrique Barboza } 36582a129767SDaniel Henrique Barboza 36598d5981c4SBharata B Rao spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm); 36600cffce56SDavid Gibson 36610cffce56SDavid Gibson addr = addr_start; 36620cffce56SDavid Gibson for (i = 0; i < nr_lmbs; i++) { 3663fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 36640cffce56SDavid Gibson addr / SPAPR_MEMORY_BLOCK_SIZE); 36650cffce56SDavid Gibson g_assert(drc); 36660cffce56SDavid Gibson 3667a8dc47fdSDavid Gibson spapr_drc_detach(drc); 36680cffce56SDavid Gibson addr += SPAPR_MEMORY_BLOCK_SIZE; 36690cffce56SDavid Gibson } 36700cffce56SDavid Gibson 3671fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 36720cffce56SDavid Gibson addr_start / SPAPR_MEMORY_BLOCK_SIZE); 36730cffce56SDavid Gibson spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 36740b55aa91SDavid Gibson nr_lmbs, spapr_drc_index(drc)); 3675cf632463SBharata B Rao } 3676cf632463SBharata B Rao 3677765d1bddSDavid Gibson /* Callback to be called during DRC release. */ 3678765d1bddSDavid Gibson void spapr_core_release(DeviceState *dev) 3679ff9006ddSIgor Mammedov { 3680a4261be1SDavid Hildenbrand HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3681a4261be1SDavid Hildenbrand 3682a4261be1SDavid Hildenbrand /* Call the unplug handler chain. This can never fail. */ 3683a4261be1SDavid Hildenbrand hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 368407578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 3685a4261be1SDavid Hildenbrand } 3686a4261be1SDavid Hildenbrand 3687a4261be1SDavid Hildenbrand static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 3688a4261be1SDavid Hildenbrand { 3689a4261be1SDavid Hildenbrand MachineState *ms = MACHINE(hotplug_dev); 3690ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); 3691ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3692535455fdSIgor Mammedov CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); 3693ff9006ddSIgor Mammedov 369446f7afa3SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 3695ce2918cbSDavid Gibson SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); 369646f7afa3SGreg Kurz int i; 369746f7afa3SGreg Kurz 369846f7afa3SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 369994ad93bdSGreg Kurz CPUState *cs = CPU(sc->threads[i]); 370046f7afa3SGreg Kurz 370146f7afa3SGreg Kurz pre_2_10_vmstate_register_dummy_icp(cs->cpu_index); 370246f7afa3SGreg Kurz } 370346f7afa3SGreg Kurz } 370446f7afa3SGreg Kurz 370507572c06SGreg Kurz assert(core_slot); 3706535455fdSIgor Mammedov core_slot->cpu = NULL; 3707981c3dcdSMarkus Armbruster qdev_unrealize(dev); 3708ff9006ddSIgor Mammedov } 3709ff9006ddSIgor Mammedov 3710115debf2SIgor Mammedov static 3711115debf2SIgor Mammedov void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev, 3712ff9006ddSIgor Mammedov Error **errp) 3713ff9006ddSIgor Mammedov { 3714ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3715535455fdSIgor Mammedov int index; 3716ce2918cbSDavid Gibson SpaprDrc *drc; 3717535455fdSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3718ff9006ddSIgor Mammedov 3719535455fdSIgor Mammedov if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) { 3720535455fdSIgor Mammedov error_setg(errp, "Unable to find CPU core with core-id: %d", 3721535455fdSIgor Mammedov cc->core_id); 3722535455fdSIgor Mammedov return; 3723535455fdSIgor Mammedov } 3724ff9006ddSIgor Mammedov if (index == 0) { 3725ff9006ddSIgor Mammedov error_setg(errp, "Boot CPU core may not be unplugged"); 3726ff9006ddSIgor Mammedov return; 3727ff9006ddSIgor Mammedov } 3728ff9006ddSIgor Mammedov 37295d0fb150SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, 37305d0fb150SGreg Kurz spapr_vcpu_id(spapr, cc->core_id)); 3731ff9006ddSIgor Mammedov g_assert(drc); 3732ff9006ddSIgor Mammedov 373347c8c915SGreg Kurz if (!spapr_drc_unplug_requested(drc)) { 3734a8dc47fdSDavid Gibson spapr_drc_detach(drc); 3735ff9006ddSIgor Mammedov spapr_hotplug_req_remove_by_index(drc); 3736ff9006ddSIgor Mammedov } 373747c8c915SGreg Kurz } 3738ff9006ddSIgor Mammedov 3739ce2918cbSDavid Gibson int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 3740345b12b9SGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 3741345b12b9SGreg Kurz { 3742ce2918cbSDavid Gibson SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev); 3743345b12b9SGreg Kurz CPUState *cs = CPU(core->threads[0]); 3744345b12b9SGreg Kurz PowerPCCPU *cpu = POWERPC_CPU(cs); 3745345b12b9SGreg Kurz DeviceClass *dc = DEVICE_GET_CLASS(cs); 3746345b12b9SGreg Kurz int id = spapr_get_vcpu_id(cpu); 3747345b12b9SGreg Kurz char *nodename; 3748345b12b9SGreg Kurz int offset; 3749345b12b9SGreg Kurz 3750345b12b9SGreg Kurz nodename = g_strdup_printf("%s@%x", dc->fw_name, id); 3751345b12b9SGreg Kurz offset = fdt_add_subnode(fdt, 0, nodename); 3752345b12b9SGreg Kurz g_free(nodename); 3753345b12b9SGreg Kurz 375491335a5eSDavid Gibson spapr_dt_cpu(cs, fdt, offset, spapr); 3755345b12b9SGreg Kurz 3756345b12b9SGreg Kurz *fdt_start_offset = offset; 3757345b12b9SGreg Kurz return 0; 3758345b12b9SGreg Kurz } 3759345b12b9SGreg Kurz 3760ff9006ddSIgor Mammedov static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3761ff9006ddSIgor Mammedov Error **errp) 3762ff9006ddSIgor Mammedov { 3763ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3764ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(spapr); 3765ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 3766ce2918cbSDavid Gibson SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev)); 3767ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3768345b12b9SGreg Kurz CPUState *cs; 3769ce2918cbSDavid Gibson SpaprDrc *drc; 3770ff9006ddSIgor Mammedov Error *local_err = NULL; 3771535455fdSIgor Mammedov CPUArchId *core_slot; 3772535455fdSIgor Mammedov int index; 377394fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 3774b1e81567SGreg Kurz int i; 3775ff9006ddSIgor Mammedov 3776535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 3777535455fdSIgor Mammedov if (!core_slot) { 3778535455fdSIgor Mammedov error_setg(errp, "Unable to find CPU core with core-id: %d", 3779535455fdSIgor Mammedov cc->core_id); 3780535455fdSIgor Mammedov return; 3781535455fdSIgor Mammedov } 37825d0fb150SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, 37835d0fb150SGreg Kurz spapr_vcpu_id(spapr, cc->core_id)); 3784ff9006ddSIgor Mammedov 3785c5514d0eSIgor Mammedov g_assert(drc || !mc->has_hotpluggable_cpus); 3786ff9006ddSIgor Mammedov 3787e49c63d5SGreg Kurz if (drc) { 378809d876ceSGreg Kurz spapr_drc_attach(drc, dev, &local_err); 3789ff9006ddSIgor Mammedov if (local_err) { 3790ff9006ddSIgor Mammedov error_propagate(errp, local_err); 3791ff9006ddSIgor Mammedov return; 3792ff9006ddSIgor Mammedov } 3793ff9006ddSIgor Mammedov 379494fd9cbaSLaurent Vivier if (hotplugged) { 3795ff9006ddSIgor Mammedov /* 379694fd9cbaSLaurent Vivier * Send hotplug notification interrupt to the guest only 379794fd9cbaSLaurent Vivier * in case of hotplugged CPUs. 3798ff9006ddSIgor Mammedov */ 3799ff9006ddSIgor Mammedov spapr_hotplug_req_add_by_index(drc); 380094fd9cbaSLaurent Vivier } else { 380194fd9cbaSLaurent Vivier spapr_drc_reset(drc); 3802ff9006ddSIgor Mammedov } 380394fd9cbaSLaurent Vivier } 380494fd9cbaSLaurent Vivier 3805535455fdSIgor Mammedov core_slot->cpu = OBJECT(dev); 380646f7afa3SGreg Kurz 380746f7afa3SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 380846f7afa3SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 3809bc877283SGreg Kurz cs = CPU(core->threads[i]); 381046f7afa3SGreg Kurz pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); 381146f7afa3SGreg Kurz } 381246f7afa3SGreg Kurz } 3813b1e81567SGreg Kurz 3814b1e81567SGreg Kurz /* 3815b1e81567SGreg Kurz * Set compatibility mode to match the boot CPU, which was either set 3816b1e81567SGreg Kurz * by the machine reset code or by CAS. 3817b1e81567SGreg Kurz */ 3818b1e81567SGreg Kurz if (hotplugged) { 3819b1e81567SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 3820*a3114923SGreg Kurz if (ppc_set_compat(core->threads[i], 3821*a3114923SGreg Kurz POWERPC_CPU(first_cpu)->compat_pvr, 3822*a3114923SGreg Kurz errp) < 0) { 3823b1e81567SGreg Kurz return; 3824b1e81567SGreg Kurz } 3825b1e81567SGreg Kurz } 3826b1e81567SGreg Kurz } 3827ff9006ddSIgor Mammedov } 3828ff9006ddSIgor Mammedov 3829ff9006ddSIgor Mammedov static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3830ff9006ddSIgor Mammedov Error **errp) 3831ff9006ddSIgor Mammedov { 3832ff9006ddSIgor Mammedov MachineState *machine = MACHINE(OBJECT(hotplug_dev)); 3833ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); 3834ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 38352e9c10ebSIgor Mammedov const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type); 3836ff9006ddSIgor Mammedov const char *type = object_get_typename(OBJECT(dev)); 3837535455fdSIgor Mammedov CPUArchId *core_slot; 3838535455fdSIgor Mammedov int index; 3839fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 3840ff9006ddSIgor Mammedov 3841c5514d0eSIgor Mammedov if (dev->hotplugged && !mc->has_hotpluggable_cpus) { 3842dcfe4805SMarkus Armbruster error_setg(errp, "CPU hotplug not supported for this machine"); 3843dcfe4805SMarkus Armbruster return; 3844ff9006ddSIgor Mammedov } 3845ff9006ddSIgor Mammedov 3846ff9006ddSIgor Mammedov if (strcmp(base_core_type, type)) { 3847dcfe4805SMarkus Armbruster error_setg(errp, "CPU core type should be %s", base_core_type); 3848dcfe4805SMarkus Armbruster return; 3849ff9006ddSIgor Mammedov } 3850ff9006ddSIgor Mammedov 3851ff9006ddSIgor Mammedov if (cc->core_id % smp_threads) { 3852dcfe4805SMarkus Armbruster error_setg(errp, "invalid core id %d", cc->core_id); 3853dcfe4805SMarkus Armbruster return; 3854ff9006ddSIgor Mammedov } 3855ff9006ddSIgor Mammedov 3856459264efSDavid Gibson /* 3857459264efSDavid Gibson * In general we should have homogeneous threads-per-core, but old 3858459264efSDavid Gibson * (pre hotplug support) machine types allow the last core to have 3859459264efSDavid Gibson * reduced threads as a compatibility hack for when we allowed 3860459264efSDavid Gibson * total vcpus not a multiple of threads-per-core. 3861459264efSDavid Gibson */ 3862459264efSDavid Gibson if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) { 3863dcfe4805SMarkus Armbruster error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads, 3864dcfe4805SMarkus Armbruster smp_threads); 3865dcfe4805SMarkus Armbruster return; 38668149e299SDavid Gibson } 38678149e299SDavid Gibson 3868535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 3869535455fdSIgor Mammedov if (!core_slot) { 3870dcfe4805SMarkus Armbruster error_setg(errp, "core id %d out of range", cc->core_id); 3871dcfe4805SMarkus Armbruster return; 3872ff9006ddSIgor Mammedov } 3873ff9006ddSIgor Mammedov 3874535455fdSIgor Mammedov if (core_slot->cpu) { 3875dcfe4805SMarkus Armbruster error_setg(errp, "core %d already populated", cc->core_id); 3876dcfe4805SMarkus Armbruster return; 3877ff9006ddSIgor Mammedov } 3878ff9006ddSIgor Mammedov 3879dcfe4805SMarkus Armbruster numa_cpu_pre_plug(core_slot, dev, errp); 3880ff9006ddSIgor Mammedov } 3881ff9006ddSIgor Mammedov 3882ce2918cbSDavid Gibson int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 3883bb2bdd81SGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 3884bb2bdd81SGreg Kurz { 3885ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev); 3886bb2bdd81SGreg Kurz int intc_phandle; 3887bb2bdd81SGreg Kurz 3888bb2bdd81SGreg Kurz intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp); 3889bb2bdd81SGreg Kurz if (intc_phandle <= 0) { 3890bb2bdd81SGreg Kurz return -1; 3891bb2bdd81SGreg Kurz } 3892bb2bdd81SGreg Kurz 38938cbe71ecSDavid Gibson if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) { 3894bb2bdd81SGreg Kurz error_setg(errp, "unable to create FDT node for PHB %d", sphb->index); 3895bb2bdd81SGreg Kurz return -1; 3896bb2bdd81SGreg Kurz } 3897bb2bdd81SGreg Kurz 3898bb2bdd81SGreg Kurz /* generally SLOF creates these, for hotplug it's up to QEMU */ 3899bb2bdd81SGreg Kurz _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci")); 3900bb2bdd81SGreg Kurz 3901bb2bdd81SGreg Kurz return 0; 3902bb2bdd81SGreg Kurz } 3903bb2bdd81SGreg Kurz 3904bb2bdd81SGreg Kurz static void spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3905bb2bdd81SGreg Kurz Error **errp) 3906bb2bdd81SGreg Kurz { 3907ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3908ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 3909ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 3910bb2bdd81SGreg Kurz const unsigned windows_supported = spapr_phb_windows_supported(sphb); 3911bb2bdd81SGreg Kurz 3912bb2bdd81SGreg Kurz if (dev->hotplugged && !smc->dr_phb_enabled) { 3913bb2bdd81SGreg Kurz error_setg(errp, "PHB hotplug not supported for this machine"); 3914bb2bdd81SGreg Kurz return; 3915bb2bdd81SGreg Kurz } 3916bb2bdd81SGreg Kurz 3917bb2bdd81SGreg Kurz if (sphb->index == (uint32_t)-1) { 3918bb2bdd81SGreg Kurz error_setg(errp, "\"index\" for PAPR PHB is mandatory"); 3919bb2bdd81SGreg Kurz return; 3920bb2bdd81SGreg Kurz } 3921bb2bdd81SGreg Kurz 3922bb2bdd81SGreg Kurz /* 3923bb2bdd81SGreg Kurz * This will check that sphb->index doesn't exceed the maximum number of 3924bb2bdd81SGreg Kurz * PHBs for the current machine type. 3925bb2bdd81SGreg Kurz */ 3926bb2bdd81SGreg Kurz smc->phb_placement(spapr, sphb->index, 3927bb2bdd81SGreg Kurz &sphb->buid, &sphb->io_win_addr, 3928bb2bdd81SGreg Kurz &sphb->mem_win_addr, &sphb->mem64_win_addr, 3929ec132efaSAlexey Kardashevskiy windows_supported, sphb->dma_liobn, 3930ec132efaSAlexey Kardashevskiy &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr, 3931ec132efaSAlexey Kardashevskiy errp); 3932bb2bdd81SGreg Kurz } 3933bb2bdd81SGreg Kurz 3934bb2bdd81SGreg Kurz static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3935bb2bdd81SGreg Kurz Error **errp) 3936bb2bdd81SGreg Kurz { 3937ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3938ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 3939ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 3940ce2918cbSDavid Gibson SpaprDrc *drc; 3941bb2bdd81SGreg Kurz bool hotplugged = spapr_drc_hotplugged(dev); 3942bb2bdd81SGreg Kurz Error *local_err = NULL; 3943bb2bdd81SGreg Kurz 3944bb2bdd81SGreg Kurz if (!smc->dr_phb_enabled) { 3945bb2bdd81SGreg Kurz return; 3946bb2bdd81SGreg Kurz } 3947bb2bdd81SGreg Kurz 3948bb2bdd81SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 3949bb2bdd81SGreg Kurz /* hotplug hooks should check it's enabled before getting this far */ 3950bb2bdd81SGreg Kurz assert(drc); 3951bb2bdd81SGreg Kurz 39528e5c952bSPhilippe Mathieu-Daudé spapr_drc_attach(drc, dev, &local_err); 3953bb2bdd81SGreg Kurz if (local_err) { 3954bb2bdd81SGreg Kurz error_propagate(errp, local_err); 3955bb2bdd81SGreg Kurz return; 3956bb2bdd81SGreg Kurz } 3957bb2bdd81SGreg Kurz 3958bb2bdd81SGreg Kurz if (hotplugged) { 3959bb2bdd81SGreg Kurz spapr_hotplug_req_add_by_index(drc); 3960bb2bdd81SGreg Kurz } else { 3961bb2bdd81SGreg Kurz spapr_drc_reset(drc); 3962bb2bdd81SGreg Kurz } 3963bb2bdd81SGreg Kurz } 3964bb2bdd81SGreg Kurz 3965bb2bdd81SGreg Kurz void spapr_phb_release(DeviceState *dev) 3966bb2bdd81SGreg Kurz { 3967bb2bdd81SGreg Kurz HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3968bb2bdd81SGreg Kurz 3969bb2bdd81SGreg Kurz hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 397007578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 3971bb2bdd81SGreg Kurz } 3972bb2bdd81SGreg Kurz 3973bb2bdd81SGreg Kurz static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 3974bb2bdd81SGreg Kurz { 3975981c3dcdSMarkus Armbruster qdev_unrealize(dev); 3976bb2bdd81SGreg Kurz } 3977bb2bdd81SGreg Kurz 3978bb2bdd81SGreg Kurz static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev, 3979bb2bdd81SGreg Kurz DeviceState *dev, Error **errp) 3980bb2bdd81SGreg Kurz { 3981ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 3982ce2918cbSDavid Gibson SpaprDrc *drc; 3983bb2bdd81SGreg Kurz 3984bb2bdd81SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 3985bb2bdd81SGreg Kurz assert(drc); 3986bb2bdd81SGreg Kurz 3987bb2bdd81SGreg Kurz if (!spapr_drc_unplug_requested(drc)) { 3988bb2bdd81SGreg Kurz spapr_drc_detach(drc); 3989bb2bdd81SGreg Kurz spapr_hotplug_req_remove_by_index(drc); 3990bb2bdd81SGreg Kurz } 3991bb2bdd81SGreg Kurz } 3992bb2bdd81SGreg Kurz 39930fb6bd07SMichael Roth static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 39940fb6bd07SMichael Roth Error **errp) 39950fb6bd07SMichael Roth { 39960fb6bd07SMichael Roth SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 39970fb6bd07SMichael Roth SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev); 39980fb6bd07SMichael Roth 39990fb6bd07SMichael Roth if (spapr->tpm_proxy != NULL) { 40000fb6bd07SMichael Roth error_setg(errp, "Only one TPM proxy can be specified for this machine"); 40010fb6bd07SMichael Roth return; 40020fb6bd07SMichael Roth } 40030fb6bd07SMichael Roth 40040fb6bd07SMichael Roth spapr->tpm_proxy = tpm_proxy; 40050fb6bd07SMichael Roth } 40060fb6bd07SMichael Roth 40070fb6bd07SMichael Roth static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 40080fb6bd07SMichael Roth { 40090fb6bd07SMichael Roth SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 40100fb6bd07SMichael Roth 4011981c3dcdSMarkus Armbruster qdev_unrealize(dev); 40120fb6bd07SMichael Roth object_unparent(OBJECT(dev)); 40130fb6bd07SMichael Roth spapr->tpm_proxy = NULL; 40140fb6bd07SMichael Roth } 40150fb6bd07SMichael Roth 4016c20d332aSBharata B Rao static void spapr_machine_device_plug(HotplugHandler *hotplug_dev, 4017c20d332aSBharata B Rao DeviceState *dev, Error **errp) 4018c20d332aSBharata B Rao { 4019c20d332aSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 402081985f3bSDavid Hildenbrand spapr_memory_plug(hotplug_dev, dev, errp); 4021af81cf32SBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4022af81cf32SBharata B Rao spapr_core_plug(hotplug_dev, dev, errp); 4023bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4024bb2bdd81SGreg Kurz spapr_phb_plug(hotplug_dev, dev, errp); 40250fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 40260fb6bd07SMichael Roth spapr_tpm_proxy_plug(hotplug_dev, dev, errp); 4027c20d332aSBharata B Rao } 4028c20d332aSBharata B Rao } 4029c20d332aSBharata B Rao 403088432f44SDavid Hildenbrand static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev, 403188432f44SDavid Hildenbrand DeviceState *dev, Error **errp) 403288432f44SDavid Hildenbrand { 40333ec71474SDavid Hildenbrand if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 40343ec71474SDavid Hildenbrand spapr_memory_unplug(hotplug_dev, dev); 4035a4261be1SDavid Hildenbrand } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4036a4261be1SDavid Hildenbrand spapr_core_unplug(hotplug_dev, dev); 4037bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4038bb2bdd81SGreg Kurz spapr_phb_unplug(hotplug_dev, dev); 40390fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 40400fb6bd07SMichael Roth spapr_tpm_proxy_unplug(hotplug_dev, dev); 40413ec71474SDavid Hildenbrand } 404288432f44SDavid Hildenbrand } 404388432f44SDavid Hildenbrand 4044cf632463SBharata B Rao static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev, 4045cf632463SBharata B Rao DeviceState *dev, Error **errp) 4046cf632463SBharata B Rao { 4047ce2918cbSDavid Gibson SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev)); 4048c86c1affSDaniel Henrique Barboza MachineClass *mc = MACHINE_GET_CLASS(sms); 4049ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4050cf632463SBharata B Rao 4051cf632463SBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 4052cf632463SBharata B Rao if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) { 4053cf632463SBharata B Rao spapr_memory_unplug_request(hotplug_dev, dev, errp); 4054cf632463SBharata B Rao } else { 4055cf632463SBharata B Rao /* NOTE: this means there is a window after guest reset, prior to 4056cf632463SBharata B Rao * CAS negotiation, where unplug requests will fail due to the 4057cf632463SBharata B Rao * capability not being detected yet. This is a bit different than 4058cf632463SBharata B Rao * the case with PCI unplug, where the events will be queued and 4059cf632463SBharata B Rao * eventually handled by the guest after boot 4060cf632463SBharata B Rao */ 4061cf632463SBharata B Rao error_setg(errp, "Memory hot unplug not supported for this guest"); 4062cf632463SBharata B Rao } 40636f4b5c3eSBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4064c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 40656f4b5c3eSBharata B Rao error_setg(errp, "CPU hot unplug not supported on this machine"); 40666f4b5c3eSBharata B Rao return; 40676f4b5c3eSBharata B Rao } 4068115debf2SIgor Mammedov spapr_core_unplug_request(hotplug_dev, dev, errp); 4069bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4070bb2bdd81SGreg Kurz if (!smc->dr_phb_enabled) { 4071bb2bdd81SGreg Kurz error_setg(errp, "PHB hot unplug not supported on this machine"); 4072bb2bdd81SGreg Kurz return; 4073bb2bdd81SGreg Kurz } 4074bb2bdd81SGreg Kurz spapr_phb_unplug_request(hotplug_dev, dev, errp); 40750fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 40760fb6bd07SMichael Roth spapr_tpm_proxy_unplug(hotplug_dev, dev); 4077c20d332aSBharata B Rao } 4078c20d332aSBharata B Rao } 4079c20d332aSBharata B Rao 408094a94e4cSBharata B Rao static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev, 408194a94e4cSBharata B Rao DeviceState *dev, Error **errp) 408294a94e4cSBharata B Rao { 4083c871bc70SLaurent Vivier if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 4084c871bc70SLaurent Vivier spapr_memory_pre_plug(hotplug_dev, dev, errp); 4085c871bc70SLaurent Vivier } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 408694a94e4cSBharata B Rao spapr_core_pre_plug(hotplug_dev, dev, errp); 4087bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4088bb2bdd81SGreg Kurz spapr_phb_pre_plug(hotplug_dev, dev, errp); 408994a94e4cSBharata B Rao } 409094a94e4cSBharata B Rao } 409194a94e4cSBharata B Rao 40927ebaf795SBharata B Rao static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine, 4093c20d332aSBharata B Rao DeviceState *dev) 4094c20d332aSBharata B Rao { 409594a94e4cSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || 4096bb2bdd81SGreg Kurz object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) || 40970fb6bd07SMichael Roth object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) || 40980fb6bd07SMichael Roth object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 4099c20d332aSBharata B Rao return HOTPLUG_HANDLER(machine); 4100c20d332aSBharata B Rao } 4101cb600087SDavid Gibson if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 4102cb600087SDavid Gibson PCIDevice *pcidev = PCI_DEVICE(dev); 4103cb600087SDavid Gibson PCIBus *root = pci_device_root_bus(pcidev); 4104cb600087SDavid Gibson SpaprPhbState *phb = 4105cb600087SDavid Gibson (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent), 4106cb600087SDavid Gibson TYPE_SPAPR_PCI_HOST_BRIDGE); 4107cb600087SDavid Gibson 4108cb600087SDavid Gibson if (phb) { 4109cb600087SDavid Gibson return HOTPLUG_HANDLER(phb); 4110cb600087SDavid Gibson } 4111cb600087SDavid Gibson } 4112c20d332aSBharata B Rao return NULL; 4113c20d332aSBharata B Rao } 4114c20d332aSBharata B Rao 4115ea089eebSIgor Mammedov static CpuInstanceProperties 4116ea089eebSIgor Mammedov spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index) 411720bb648dSDavid Gibson { 4118ea089eebSIgor Mammedov CPUArchId *core_slot; 4119ea089eebSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 4120ea089eebSIgor Mammedov 4121ea089eebSIgor Mammedov /* make sure possible_cpu are intialized */ 4122ea089eebSIgor Mammedov mc->possible_cpu_arch_ids(machine); 4123ea089eebSIgor Mammedov /* get CPU core slot containing thread that matches cpu_index */ 4124ea089eebSIgor Mammedov core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL); 4125ea089eebSIgor Mammedov assert(core_slot); 4126ea089eebSIgor Mammedov return core_slot->props; 412720bb648dSDavid Gibson } 412820bb648dSDavid Gibson 412979e07936SIgor Mammedov static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx) 413079e07936SIgor Mammedov { 4131aa570207STao Xu return idx / ms->smp.cores % ms->numa_state->num_nodes; 413279e07936SIgor Mammedov } 413379e07936SIgor Mammedov 4134535455fdSIgor Mammedov static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) 4135535455fdSIgor Mammedov { 4136535455fdSIgor Mammedov int i; 4137fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 4138fe6b6346SLike Xu unsigned int smp_cpus = machine->smp.cpus; 4139d342eb76SIgor Mammedov const char *core_type; 4140fe6b6346SLike Xu int spapr_max_cores = machine->smp.max_cpus / smp_threads; 4141535455fdSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 4142535455fdSIgor Mammedov 4143c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 4144535455fdSIgor Mammedov spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads; 4145535455fdSIgor Mammedov } 4146535455fdSIgor Mammedov if (machine->possible_cpus) { 4147535455fdSIgor Mammedov assert(machine->possible_cpus->len == spapr_max_cores); 4148535455fdSIgor Mammedov return machine->possible_cpus; 4149535455fdSIgor Mammedov } 4150535455fdSIgor Mammedov 4151d342eb76SIgor Mammedov core_type = spapr_get_cpu_core_type(machine->cpu_type); 4152d342eb76SIgor Mammedov if (!core_type) { 4153d342eb76SIgor Mammedov error_report("Unable to find sPAPR CPU Core definition"); 4154d342eb76SIgor Mammedov exit(1); 4155d342eb76SIgor Mammedov } 4156d342eb76SIgor Mammedov 4157535455fdSIgor Mammedov machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + 4158535455fdSIgor Mammedov sizeof(CPUArchId) * spapr_max_cores); 4159535455fdSIgor Mammedov machine->possible_cpus->len = spapr_max_cores; 4160535455fdSIgor Mammedov for (i = 0; i < machine->possible_cpus->len; i++) { 4161535455fdSIgor Mammedov int core_id = i * smp_threads; 4162535455fdSIgor Mammedov 4163d342eb76SIgor Mammedov machine->possible_cpus->cpus[i].type = core_type; 4164f2d672c2SIgor Mammedov machine->possible_cpus->cpus[i].vcpus_count = smp_threads; 4165535455fdSIgor Mammedov machine->possible_cpus->cpus[i].arch_id = core_id; 4166535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.has_core_id = true; 4167535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.core_id = core_id; 4168535455fdSIgor Mammedov } 4169535455fdSIgor Mammedov return machine->possible_cpus; 4170535455fdSIgor Mammedov } 4171535455fdSIgor Mammedov 4172ce2918cbSDavid Gibson static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, 4173daa23699SDavid Gibson uint64_t *buid, hwaddr *pio, 4174daa23699SDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 4175ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4176ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 41776737d9adSDavid Gibson { 4178357d1e3bSDavid Gibson /* 4179357d1e3bSDavid Gibson * New-style PHB window placement. 4180357d1e3bSDavid Gibson * 4181357d1e3bSDavid Gibson * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window 4182357d1e3bSDavid Gibson * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO 4183357d1e3bSDavid Gibson * windows. 4184357d1e3bSDavid Gibson * 4185357d1e3bSDavid Gibson * Some guest kernels can't work with MMIO windows above 1<<46 4186357d1e3bSDavid Gibson * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB 4187357d1e3bSDavid Gibson * 4188357d1e3bSDavid Gibson * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each 4189357d1e3bSDavid Gibson * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the 4190357d1e3bSDavid Gibson * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the 4191357d1e3bSDavid Gibson * 1TiB 64-bit MMIO windows for each PHB. 4192357d1e3bSDavid Gibson */ 41936737d9adSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 41946737d9adSDavid Gibson int i; 41956737d9adSDavid Gibson 4196357d1e3bSDavid Gibson /* Sanity check natural alignments */ 4197357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 4198357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 4199357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0); 4200357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0); 4201357d1e3bSDavid Gibson /* Sanity check bounds */ 420225e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) > 420325e6a118SMichael S. Tsirkin SPAPR_PCI_MEM32_WIN_SIZE); 420425e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) > 420525e6a118SMichael S. Tsirkin SPAPR_PCI_MEM64_WIN_SIZE); 42062efff1c0SDavid Gibson 420725e6a118SMichael S. Tsirkin if (index >= SPAPR_MAX_PHBS) { 420825e6a118SMichael S. Tsirkin error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)", 420925e6a118SMichael S. Tsirkin SPAPR_MAX_PHBS - 1); 42106737d9adSDavid Gibson return; 42116737d9adSDavid Gibson } 42126737d9adSDavid Gibson 42136737d9adSDavid Gibson *buid = base_buid + index; 42146737d9adSDavid Gibson for (i = 0; i < n_dma; ++i) { 42156737d9adSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 42166737d9adSDavid Gibson } 42176737d9adSDavid Gibson 4218357d1e3bSDavid Gibson *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; 4219357d1e3bSDavid Gibson *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; 4220357d1e3bSDavid Gibson *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; 4221ec132efaSAlexey Kardashevskiy 4222ec132efaSAlexey Kardashevskiy *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE; 4223ec132efaSAlexey Kardashevskiy *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE; 42246737d9adSDavid Gibson } 42256737d9adSDavid Gibson 42267844e12bSCédric Le Goater static ICSState *spapr_ics_get(XICSFabric *dev, int irq) 42277844e12bSCédric Le Goater { 4228ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(dev); 42297844e12bSCédric Le Goater 42307844e12bSCédric Le Goater return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL; 42317844e12bSCédric Le Goater } 42327844e12bSCédric Le Goater 42337844e12bSCédric Le Goater static void spapr_ics_resend(XICSFabric *dev) 42347844e12bSCédric Le Goater { 4235ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(dev); 42367844e12bSCédric Le Goater 42377844e12bSCédric Le Goater ics_resend(spapr->ics); 42387844e12bSCédric Le Goater } 42397844e12bSCédric Le Goater 424081210c20SSam Bobroff static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id) 4241b2fc59aaSCédric Le Goater { 42422e886fb3SSam Bobroff PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); 4243b2fc59aaSCédric Le Goater 4244a28b9a5aSCédric Le Goater return cpu ? spapr_cpu_state(cpu)->icp : NULL; 4245b2fc59aaSCédric Le Goater } 4246b2fc59aaSCédric Le Goater 42476449da45SCédric Le Goater static void spapr_pic_print_info(InterruptStatsProvider *obj, 42486449da45SCédric Le Goater Monitor *mon) 42496449da45SCédric Le Goater { 4250ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 42516449da45SCédric Le Goater 4252328d8eb2SDavid Gibson spapr_irq_print_info(spapr, mon); 4253f041d6afSGreg Kurz monitor_printf(mon, "irqchip: %s\n", 4254f041d6afSGreg Kurz kvm_irqchip_in_kernel() ? "in-kernel" : "emulated"); 42556449da45SCédric Le Goater } 42566449da45SCédric Le Goater 4257baa45b17SCédric Le Goater /* 4258baa45b17SCédric Le Goater * This is a XIVE only operation 4259baa45b17SCédric Le Goater */ 4260932de7aeSCédric Le Goater static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, 4261932de7aeSCédric Le Goater uint8_t nvt_blk, uint32_t nvt_idx, 4262932de7aeSCédric Le Goater bool cam_ignore, uint8_t priority, 4263932de7aeSCédric Le Goater uint32_t logic_serv, XiveTCTXMatch *match) 4264932de7aeSCédric Le Goater { 4265932de7aeSCédric Le Goater SpaprMachineState *spapr = SPAPR_MACHINE(xfb); 4266baa45b17SCédric Le Goater XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc); 4267932de7aeSCédric Le Goater XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); 4268932de7aeSCédric Le Goater int count; 4269932de7aeSCédric Le Goater 4270932de7aeSCédric Le Goater count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, 4271932de7aeSCédric Le Goater priority, logic_serv, match); 4272932de7aeSCédric Le Goater if (count < 0) { 4273932de7aeSCédric Le Goater return count; 4274932de7aeSCédric Le Goater } 4275932de7aeSCédric Le Goater 4276932de7aeSCédric Le Goater /* 4277932de7aeSCédric Le Goater * When we implement the save and restore of the thread interrupt 4278932de7aeSCédric Le Goater * contexts in the enter/exit CPU handlers of the machine and the 4279932de7aeSCédric Le Goater * escalations in QEMU, we should be able to handle non dispatched 4280932de7aeSCédric Le Goater * vCPUs. 4281932de7aeSCédric Le Goater * 4282932de7aeSCédric Le Goater * Until this is done, the sPAPR machine should find at least one 4283932de7aeSCédric Le Goater * matching context always. 4284932de7aeSCédric Le Goater */ 4285932de7aeSCédric Le Goater if (count == 0) { 4286932de7aeSCédric Le Goater qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n", 4287932de7aeSCédric Le Goater nvt_blk, nvt_idx); 4288932de7aeSCédric Le Goater } 4289932de7aeSCédric Le Goater 4290932de7aeSCédric Le Goater return count; 4291932de7aeSCédric Le Goater } 4292932de7aeSCédric Le Goater 429314bb4486SGreg Kurz int spapr_get_vcpu_id(PowerPCCPU *cpu) 42942e886fb3SSam Bobroff { 4295b1a568c1SGreg Kurz return cpu->vcpu_id; 42962e886fb3SSam Bobroff } 42972e886fb3SSam Bobroff 4298648edb64SGreg Kurz void spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp) 4299648edb64SGreg Kurz { 4300ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 4301fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 4302648edb64SGreg Kurz int vcpu_id; 4303648edb64SGreg Kurz 43045d0fb150SGreg Kurz vcpu_id = spapr_vcpu_id(spapr, cpu_index); 4305648edb64SGreg Kurz 4306648edb64SGreg Kurz if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) { 4307648edb64SGreg Kurz error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id); 4308648edb64SGreg Kurz error_append_hint(errp, "Adjust the number of cpus to %d " 4309648edb64SGreg Kurz "or try to raise the number of threads per core\n", 4310fe6b6346SLike Xu vcpu_id * ms->smp.threads / spapr->vsmt); 4311648edb64SGreg Kurz return; 4312648edb64SGreg Kurz } 4313648edb64SGreg Kurz 4314648edb64SGreg Kurz cpu->vcpu_id = vcpu_id; 4315648edb64SGreg Kurz } 4316648edb64SGreg Kurz 43172e886fb3SSam Bobroff PowerPCCPU *spapr_find_cpu(int vcpu_id) 43182e886fb3SSam Bobroff { 43192e886fb3SSam Bobroff CPUState *cs; 43202e886fb3SSam Bobroff 43212e886fb3SSam Bobroff CPU_FOREACH(cs) { 43222e886fb3SSam Bobroff PowerPCCPU *cpu = POWERPC_CPU(cs); 43232e886fb3SSam Bobroff 432414bb4486SGreg Kurz if (spapr_get_vcpu_id(cpu) == vcpu_id) { 43252e886fb3SSam Bobroff return cpu; 43262e886fb3SSam Bobroff } 43272e886fb3SSam Bobroff } 43282e886fb3SSam Bobroff 43292e886fb3SSam Bobroff return NULL; 43302e886fb3SSam Bobroff } 43312e886fb3SSam Bobroff 433203ef074cSNicholas Piggin static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) 433303ef074cSNicholas Piggin { 433403ef074cSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 433503ef074cSNicholas Piggin 433603ef074cSNicholas Piggin /* These are only called by TCG, KVM maintains dispatch state */ 433703ef074cSNicholas Piggin 43383a6e6224SNicholas Piggin spapr_cpu->prod = false; 433903ef074cSNicholas Piggin if (spapr_cpu->vpa_addr) { 434003ef074cSNicholas Piggin CPUState *cs = CPU(cpu); 434103ef074cSNicholas Piggin uint32_t dispatch; 434203ef074cSNicholas Piggin 434303ef074cSNicholas Piggin dispatch = ldl_be_phys(cs->as, 434403ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); 434503ef074cSNicholas Piggin dispatch++; 434603ef074cSNicholas Piggin if ((dispatch & 1) != 0) { 434703ef074cSNicholas Piggin qemu_log_mask(LOG_GUEST_ERROR, 434803ef074cSNicholas Piggin "VPA: incorrect dispatch counter value for " 434903ef074cSNicholas Piggin "dispatched partition %u, correcting.\n", dispatch); 435003ef074cSNicholas Piggin dispatch++; 435103ef074cSNicholas Piggin } 435203ef074cSNicholas Piggin stl_be_phys(cs->as, 435303ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); 435403ef074cSNicholas Piggin } 435503ef074cSNicholas Piggin } 435603ef074cSNicholas Piggin 435703ef074cSNicholas Piggin static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) 435803ef074cSNicholas Piggin { 435903ef074cSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 436003ef074cSNicholas Piggin 436103ef074cSNicholas Piggin if (spapr_cpu->vpa_addr) { 436203ef074cSNicholas Piggin CPUState *cs = CPU(cpu); 436303ef074cSNicholas Piggin uint32_t dispatch; 436403ef074cSNicholas Piggin 436503ef074cSNicholas Piggin dispatch = ldl_be_phys(cs->as, 436603ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); 436703ef074cSNicholas Piggin dispatch++; 436803ef074cSNicholas Piggin if ((dispatch & 1) != 1) { 436903ef074cSNicholas Piggin qemu_log_mask(LOG_GUEST_ERROR, 437003ef074cSNicholas Piggin "VPA: incorrect dispatch counter value for " 437103ef074cSNicholas Piggin "preempted partition %u, correcting.\n", dispatch); 437203ef074cSNicholas Piggin dispatch++; 437303ef074cSNicholas Piggin } 437403ef074cSNicholas Piggin stl_be_phys(cs->as, 437503ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); 437603ef074cSNicholas Piggin } 437703ef074cSNicholas Piggin } 437803ef074cSNicholas Piggin 437929ee3247SAlexey Kardashevskiy static void spapr_machine_class_init(ObjectClass *oc, void *data) 438053018216SPaolo Bonzini { 438129ee3247SAlexey Kardashevskiy MachineClass *mc = MACHINE_CLASS(oc); 4382ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc); 438371461b0fSAlexey Kardashevskiy FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); 438434316482SAlexey Kardashevskiy NMIClass *nc = NMI_CLASS(oc); 4385c20d332aSBharata B Rao HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); 43861d1be34dSDavid Gibson PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); 43877844e12bSCédric Le Goater XICSFabricClass *xic = XICS_FABRIC_CLASS(oc); 43886449da45SCédric Le Goater InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); 4389932de7aeSCédric Le Goater XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc); 439029ee3247SAlexey Kardashevskiy 43910eb9054cSDavid Gibson mc->desc = "pSeries Logical Partition (PAPR compliant)"; 4392907aac2fSMark Cave-Ayland mc->ignore_boot_device_suffixes = true; 4393fc9f38c3SDavid Gibson 4394fc9f38c3SDavid Gibson /* 4395fc9f38c3SDavid Gibson * We set up the default / latest behaviour here. The class_init 4396fc9f38c3SDavid Gibson * functions for the specific versioned machine types can override 4397fc9f38c3SDavid Gibson * these details for backwards compatibility 4398fc9f38c3SDavid Gibson */ 4399bcb5ce08SDavid Gibson mc->init = spapr_machine_init; 4400bcb5ce08SDavid Gibson mc->reset = spapr_machine_reset; 4401958db90cSMarcel Apfelbaum mc->block_default_type = IF_SCSI; 44026244bb7eSGreg Kurz mc->max_cpus = 1024; 4403958db90cSMarcel Apfelbaum mc->no_parallel = 1; 44045b2128d2SAlexander Graf mc->default_boot_order = ""; 4405d23b6caaSPhilippe Mathieu-Daudé mc->default_ram_size = 512 * MiB; 4406ab74e543SIgor Mammedov mc->default_ram_id = "ppc_spapr.ram"; 440729f9cef3SSebastian Bauer mc->default_display = "std"; 4408958db90cSMarcel Apfelbaum mc->kvm_type = spapr_kvm_type; 44097da79a16SEduardo Habkost machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE); 4410e4024630SLaurent Vivier mc->pci_allow_0_address = true; 4411debbdc00SIgor Mammedov assert(!mc->get_hotplug_handler); 44127ebaf795SBharata B Rao mc->get_hotplug_handler = spapr_get_hotplug_handler; 441394a94e4cSBharata B Rao hc->pre_plug = spapr_machine_device_pre_plug; 4414c20d332aSBharata B Rao hc->plug = spapr_machine_device_plug; 4415ea089eebSIgor Mammedov mc->cpu_index_to_instance_props = spapr_cpu_index_to_props; 441679e07936SIgor Mammedov mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id; 4417535455fdSIgor Mammedov mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids; 4418cf632463SBharata B Rao hc->unplug_request = spapr_machine_device_unplug_request; 441988432f44SDavid Hildenbrand hc->unplug = spapr_machine_device_unplug; 442000b4fbe2SMarcel Apfelbaum 4421fc9f38c3SDavid Gibson smc->dr_lmb_enabled = true; 4422fea35ca4SAlexey Kardashevskiy smc->update_dt_enabled = true; 442334a6b015SCédric Le Goater mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); 4424c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = true; 4425ee3a71e3SShivaprasad G Bhat mc->nvdimm_supported = true; 442652b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; 442771461b0fSAlexey Kardashevskiy fwc->get_dev_path = spapr_get_fw_dev_path; 442834316482SAlexey Kardashevskiy nc->nmi_monitor_handler = spapr_nmi; 44296737d9adSDavid Gibson smc->phb_placement = spapr_phb_placement; 44301d1be34dSDavid Gibson vhc->hypercall = emulate_spapr_hypercall; 4431e57ca75cSDavid Gibson vhc->hpt_mask = spapr_hpt_mask; 4432e57ca75cSDavid Gibson vhc->map_hptes = spapr_map_hptes; 4433e57ca75cSDavid Gibson vhc->unmap_hptes = spapr_unmap_hptes; 4434a2dd4e83SBenjamin Herrenschmidt vhc->hpte_set_c = spapr_hpte_set_c; 4435a2dd4e83SBenjamin Herrenschmidt vhc->hpte_set_r = spapr_hpte_set_r; 443679825f4dSBenjamin Herrenschmidt vhc->get_pate = spapr_get_pate; 44371ec26c75SGreg Kurz vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr; 443803ef074cSNicholas Piggin vhc->cpu_exec_enter = spapr_cpu_exec_enter; 443903ef074cSNicholas Piggin vhc->cpu_exec_exit = spapr_cpu_exec_exit; 44407844e12bSCédric Le Goater xic->ics_get = spapr_ics_get; 44417844e12bSCédric Le Goater xic->ics_resend = spapr_ics_resend; 4442b2fc59aaSCédric Le Goater xic->icp_get = spapr_icp_get; 44436449da45SCédric Le Goater ispc->print_info = spapr_pic_print_info; 444455641213SLaurent Vivier /* Force NUMA node memory size to be a multiple of 444555641213SLaurent Vivier * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity 444655641213SLaurent Vivier * in which LMBs are represented and hot-added 444755641213SLaurent Vivier */ 444855641213SLaurent Vivier mc->numa_mem_align_shift = 28; 44490533ef5fSTao Xu mc->auto_enable_numa = true; 445033face6bSDavid Gibson 44514e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF; 44524e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON; 44534e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON; 44542782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; 44552782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; 44562782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND; 44572309832aSDavid Gibson smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */ 4458b9a477b7SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF; 4459edaa7995SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON; 446037965dfeSDavid Gibson smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON; 44618af7e1feSNicholas Piggin smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON; 446240c2281cSMarkus Armbruster spapr_caps_add_properties(smc); 4463bd94bc06SCédric Le Goater smc->irq = &spapr_irq_dual; 4464dae5e39aSMichael Roth smc->dr_phb_enabled = true; 44656c3829a2SAlexey Kardashevskiy smc->linux_pci_probe = true; 446629cb4187SGreg Kurz smc->smp_threads_vsmt = true; 446754255c1fSDavid Gibson smc->nr_xirqs = SPAPR_NR_XIRQS; 4468932de7aeSCédric Le Goater xfc->match_nvt = spapr_match_nvt; 446953018216SPaolo Bonzini } 447053018216SPaolo Bonzini 447129ee3247SAlexey Kardashevskiy static const TypeInfo spapr_machine_info = { 447229ee3247SAlexey Kardashevskiy .name = TYPE_SPAPR_MACHINE, 447329ee3247SAlexey Kardashevskiy .parent = TYPE_MACHINE, 44744aee7362SDavid Gibson .abstract = true, 4475ce2918cbSDavid Gibson .instance_size = sizeof(SpaprMachineState), 4476bcb5ce08SDavid Gibson .instance_init = spapr_instance_init, 447787bbdd9cSDavid Gibson .instance_finalize = spapr_machine_finalizefn, 4478ce2918cbSDavid Gibson .class_size = sizeof(SpaprMachineClass), 447929ee3247SAlexey Kardashevskiy .class_init = spapr_machine_class_init, 448071461b0fSAlexey Kardashevskiy .interfaces = (InterfaceInfo[]) { 448171461b0fSAlexey Kardashevskiy { TYPE_FW_PATH_PROVIDER }, 448234316482SAlexey Kardashevskiy { TYPE_NMI }, 4483c20d332aSBharata B Rao { TYPE_HOTPLUG_HANDLER }, 44841d1be34dSDavid Gibson { TYPE_PPC_VIRTUAL_HYPERVISOR }, 44857844e12bSCédric Le Goater { TYPE_XICS_FABRIC }, 44866449da45SCédric Le Goater { TYPE_INTERRUPT_STATS_PROVIDER }, 4487932de7aeSCédric Le Goater { TYPE_XIVE_FABRIC }, 448871461b0fSAlexey Kardashevskiy { } 448971461b0fSAlexey Kardashevskiy }, 449029ee3247SAlexey Kardashevskiy }; 449129ee3247SAlexey Kardashevskiy 4492a7849268SMichael S. Tsirkin static void spapr_machine_latest_class_options(MachineClass *mc) 4493a7849268SMichael S. Tsirkin { 4494a7849268SMichael S. Tsirkin mc->alias = "pseries"; 4495ea0ac7f6SPhilippe Mathieu-Daudé mc->is_default = true; 4496a7849268SMichael S. Tsirkin } 4497a7849268SMichael S. Tsirkin 4498fccbc785SDavid Gibson #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \ 44995013c547SDavid Gibson static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \ 45005013c547SDavid Gibson void *data) \ 45015013c547SDavid Gibson { \ 45025013c547SDavid Gibson MachineClass *mc = MACHINE_CLASS(oc); \ 45035013c547SDavid Gibson spapr_machine_##suffix##_class_options(mc); \ 4504fccbc785SDavid Gibson if (latest) { \ 4505a7849268SMichael S. Tsirkin spapr_machine_latest_class_options(mc); \ 4506fccbc785SDavid Gibson } \ 45075013c547SDavid Gibson } \ 45085013c547SDavid Gibson static const TypeInfo spapr_machine_##suffix##_info = { \ 45095013c547SDavid Gibson .name = MACHINE_TYPE_NAME("pseries-" verstr), \ 45105013c547SDavid Gibson .parent = TYPE_SPAPR_MACHINE, \ 45115013c547SDavid Gibson .class_init = spapr_machine_##suffix##_class_init, \ 45125013c547SDavid Gibson }; \ 45135013c547SDavid Gibson static void spapr_machine_register_##suffix(void) \ 45145013c547SDavid Gibson { \ 45155013c547SDavid Gibson type_register(&spapr_machine_##suffix##_info); \ 45165013c547SDavid Gibson } \ 45170e6aac87SEduardo Habkost type_init(spapr_machine_register_##suffix) 45185013c547SDavid Gibson 45191c5f29bbSDavid Gibson /* 45203ff3c5d3SCornelia Huck * pseries-5.2 45213eb74d20SCornelia Huck */ 45223ff3c5d3SCornelia Huck static void spapr_machine_5_2_class_options(MachineClass *mc) 45233eb74d20SCornelia Huck { 45243eb74d20SCornelia Huck /* Defaults for the latest behaviour inherited from the base class */ 45253eb74d20SCornelia Huck } 45263eb74d20SCornelia Huck 45273ff3c5d3SCornelia Huck DEFINE_SPAPR_MACHINE(5_2, "5.2", true); 45283ff3c5d3SCornelia Huck 45293ff3c5d3SCornelia Huck /* 45303ff3c5d3SCornelia Huck * pseries-5.1 45313ff3c5d3SCornelia Huck */ 45323ff3c5d3SCornelia Huck static void spapr_machine_5_1_class_options(MachineClass *mc) 45333ff3c5d3SCornelia Huck { 45343ff3c5d3SCornelia Huck spapr_machine_5_2_class_options(mc); 45353ff3c5d3SCornelia Huck compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len); 45363ff3c5d3SCornelia Huck } 45373ff3c5d3SCornelia Huck 45383ff3c5d3SCornelia Huck DEFINE_SPAPR_MACHINE(5_1, "5.1", false); 4539541aaa1dSCornelia Huck 4540541aaa1dSCornelia Huck /* 4541541aaa1dSCornelia Huck * pseries-5.0 4542541aaa1dSCornelia Huck */ 4543541aaa1dSCornelia Huck static void spapr_machine_5_0_class_options(MachineClass *mc) 4544541aaa1dSCornelia Huck { 4545a6030d7eSReza Arbab SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4546a6030d7eSReza Arbab static GlobalProperty compat[] = { 4547a6030d7eSReza Arbab { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" }, 4548a6030d7eSReza Arbab }; 4549a6030d7eSReza Arbab 4550541aaa1dSCornelia Huck spapr_machine_5_1_class_options(mc); 4551541aaa1dSCornelia Huck compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len); 4552a6030d7eSReza Arbab compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 455332a354dcSIgor Mammedov mc->numa_mem_supported = true; 4554a6030d7eSReza Arbab smc->pre_5_1_assoc_refpoints = true; 4555541aaa1dSCornelia Huck } 4556541aaa1dSCornelia Huck 4557541aaa1dSCornelia Huck DEFINE_SPAPR_MACHINE(5_0, "5.0", false); 45583eb74d20SCornelia Huck 45593eb74d20SCornelia Huck /* 45609aec2e52SCornelia Huck * pseries-4.2 4561e2676b16SGreg Kurz */ 45629aec2e52SCornelia Huck static void spapr_machine_4_2_class_options(MachineClass *mc) 4563e2676b16SGreg Kurz { 456437965dfeSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 456537965dfeSDavid Gibson 45663eb74d20SCornelia Huck spapr_machine_5_0_class_options(mc); 45675f258577SEvgeny Yakovlev compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len); 456837965dfeSDavid Gibson smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF; 45698af7e1feSNicholas Piggin smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF; 45701052ab67SDavid Gibson smc->rma_limit = 16 * GiB; 4571ee3a71e3SShivaprasad G Bhat mc->nvdimm_supported = false; 4572e2676b16SGreg Kurz } 4573e2676b16SGreg Kurz 45743eb74d20SCornelia Huck DEFINE_SPAPR_MACHINE(4_2, "4.2", false); 45759aec2e52SCornelia Huck 45769aec2e52SCornelia Huck /* 45779aec2e52SCornelia Huck * pseries-4.1 45789aec2e52SCornelia Huck */ 45799aec2e52SCornelia Huck static void spapr_machine_4_1_class_options(MachineClass *mc) 45809aec2e52SCornelia Huck { 45816c3829a2SAlexey Kardashevskiy SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4582d15d4ad6SDavid Gibson static GlobalProperty compat[] = { 4583d15d4ad6SDavid Gibson /* Only allow 4kiB and 64kiB IOMMU pagesizes */ 4584d15d4ad6SDavid Gibson { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" }, 4585d15d4ad6SDavid Gibson }; 4586d15d4ad6SDavid Gibson 45879aec2e52SCornelia Huck spapr_machine_4_2_class_options(mc); 45886c3829a2SAlexey Kardashevskiy smc->linux_pci_probe = false; 458929cb4187SGreg Kurz smc->smp_threads_vsmt = false; 45909aec2e52SCornelia Huck compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len); 4591d15d4ad6SDavid Gibson compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 45929aec2e52SCornelia Huck } 45939aec2e52SCornelia Huck 45949aec2e52SCornelia Huck DEFINE_SPAPR_MACHINE(4_1, "4.1", false); 45959bf2650bSCornelia Huck 45969bf2650bSCornelia Huck /* 45979bf2650bSCornelia Huck * pseries-4.0 45989bf2650bSCornelia Huck */ 4599eb3cba82SDavid Gibson static void phb_placement_4_0(SpaprMachineState *spapr, uint32_t index, 4600ec132efaSAlexey Kardashevskiy uint64_t *buid, hwaddr *pio, 4601ec132efaSAlexey Kardashevskiy hwaddr *mmio32, hwaddr *mmio64, 4602ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4603ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4604ec132efaSAlexey Kardashevskiy { 4605ec132efaSAlexey Kardashevskiy spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, liobns, 4606ec132efaSAlexey Kardashevskiy nv2gpa, nv2atsd, errp); 4607ec132efaSAlexey Kardashevskiy *nv2gpa = 0; 4608ec132efaSAlexey Kardashevskiy *nv2atsd = 0; 4609ec132efaSAlexey Kardashevskiy } 4610ec132efaSAlexey Kardashevskiy 4611eb3cba82SDavid Gibson static void spapr_machine_4_0_class_options(MachineClass *mc) 4612eb3cba82SDavid Gibson { 4613eb3cba82SDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4614eb3cba82SDavid Gibson 4615eb3cba82SDavid Gibson spapr_machine_4_1_class_options(mc); 4616eb3cba82SDavid Gibson compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); 4617eb3cba82SDavid Gibson smc->phb_placement = phb_placement_4_0; 4618bd94bc06SCédric Le Goater smc->irq = &spapr_irq_xics; 46193725ef1aSGreg Kurz smc->pre_4_1_migration = true; 4620eb3cba82SDavid Gibson } 4621eb3cba82SDavid Gibson 4622eb3cba82SDavid Gibson DEFINE_SPAPR_MACHINE(4_0, "4.0", false); 4623eb3cba82SDavid Gibson 4624eb3cba82SDavid Gibson /* 4625eb3cba82SDavid Gibson * pseries-3.1 4626eb3cba82SDavid Gibson */ 462788cbe073SMarc-André Lureau static void spapr_machine_3_1_class_options(MachineClass *mc) 462888cbe073SMarc-André Lureau { 4629ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4630fea35ca4SAlexey Kardashevskiy 463184e060bfSAlex Williamson spapr_machine_4_0_class_options(mc); 4632abd93cc7SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); 463327461d69SPrasad J Pandit 463434a6b015SCédric Le Goater mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); 4635fea35ca4SAlexey Kardashevskiy smc->update_dt_enabled = false; 4636dae5e39aSMichael Roth smc->dr_phb_enabled = false; 46370a794529SDavid Gibson smc->broken_host_serial_model = true; 46382782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN; 46392782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN; 46402782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN; 4641edaa7995SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; 464284e060bfSAlex Williamson } 464384e060bfSAlex Williamson 464484e060bfSAlex Williamson DEFINE_SPAPR_MACHINE(3_1, "3.1", false); 4645d45360d9SCédric Le Goater 4646d45360d9SCédric Le Goater /* 4647d45360d9SCédric Le Goater * pseries-3.0 4648d45360d9SCédric Le Goater */ 4649d45360d9SCédric Le Goater 4650d45360d9SCédric Le Goater static void spapr_machine_3_0_class_options(MachineClass *mc) 4651d45360d9SCédric Le Goater { 4652ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 465382cffa2eSCédric Le Goater 4654d45360d9SCédric Le Goater spapr_machine_3_1_class_options(mc); 4655ddb3235dSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len); 465682cffa2eSCédric Le Goater 465782cffa2eSCédric Le Goater smc->legacy_irq_allocation = true; 465854255c1fSDavid Gibson smc->nr_xirqs = 0x400; 4659ae837402SCédric Le Goater smc->irq = &spapr_irq_xics_legacy; 4660d45360d9SCédric Le Goater } 4661d45360d9SCédric Le Goater 4662d45360d9SCédric Le Goater DEFINE_SPAPR_MACHINE(3_0, "3.0", false); 46638a4fd427SDavid Gibson 46648a4fd427SDavid Gibson /* 46658a4fd427SDavid Gibson * pseries-2.12 46668a4fd427SDavid Gibson */ 466788cbe073SMarc-André Lureau static void spapr_machine_2_12_class_options(MachineClass *mc) 466888cbe073SMarc-André Lureau { 4669ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 467088cbe073SMarc-André Lureau static GlobalProperty compat[] = { 46716c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" }, 46726c36bddfSEduardo Habkost { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" }, 4673fa386d98SMarc-André Lureau }; 46748a4fd427SDavid Gibson 4675d8c0c7afSPeter Maydell spapr_machine_3_0_class_options(mc); 46760d47310bSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); 467788cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 46782309832aSDavid Gibson 4679e8937295SGreg Kurz /* We depend on kvm_enabled() to choose a default value for the 4680e8937295SGreg Kurz * hpt-max-page-size capability. Of course we can't do it here 4681e8937295SGreg Kurz * because this is too early and the HW accelerator isn't initialzed 4682e8937295SGreg Kurz * yet. Postpone this to machine init (see default_caps_with_cpu()). 4683e8937295SGreg Kurz */ 4684e8937295SGreg Kurz smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0; 46858a4fd427SDavid Gibson } 46868a4fd427SDavid Gibson 46878a4fd427SDavid Gibson DEFINE_SPAPR_MACHINE(2_12, "2.12", false); 46882b615412SDavid Gibson 4689813f3cf6SSuraj Jitindar Singh static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc) 4690813f3cf6SSuraj Jitindar Singh { 4691ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4692813f3cf6SSuraj Jitindar Singh 4693813f3cf6SSuraj Jitindar Singh spapr_machine_2_12_class_options(mc); 4694813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; 4695813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; 4696813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD; 4697813f3cf6SSuraj Jitindar Singh } 4698813f3cf6SSuraj Jitindar Singh 4699813f3cf6SSuraj Jitindar Singh DEFINE_SPAPR_MACHINE(2_12_sxxm, "2.12-sxxm", false); 4700813f3cf6SSuraj Jitindar Singh 47012b615412SDavid Gibson /* 47022b615412SDavid Gibson * pseries-2.11 47032b615412SDavid Gibson */ 47042b615412SDavid Gibson 47052b615412SDavid Gibson static void spapr_machine_2_11_class_options(MachineClass *mc) 47062b615412SDavid Gibson { 4707ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4708ee76a09fSDavid Gibson 47092b615412SDavid Gibson spapr_machine_2_12_class_options(mc); 47104e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON; 471143df70a9SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); 47122b615412SDavid Gibson } 47132b615412SDavid Gibson 47142b615412SDavid Gibson DEFINE_SPAPR_MACHINE(2_11, "2.11", false); 4715e2676b16SGreg Kurz 4716e2676b16SGreg Kurz /* 47173fa14fbeSDavid Gibson * pseries-2.10 4718db800b21SDavid Gibson */ 4719e2676b16SGreg Kurz 47203fa14fbeSDavid Gibson static void spapr_machine_2_10_class_options(MachineClass *mc) 4721db800b21SDavid Gibson { 4722e2676b16SGreg Kurz spapr_machine_2_11_class_options(mc); 4723503224f4SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); 4724db800b21SDavid Gibson } 4725db800b21SDavid Gibson 4726e2676b16SGreg Kurz DEFINE_SPAPR_MACHINE(2_10, "2.10", false); 47273fa14fbeSDavid Gibson 47283fa14fbeSDavid Gibson /* 47293fa14fbeSDavid Gibson * pseries-2.9 47303fa14fbeSDavid Gibson */ 473188cbe073SMarc-André Lureau 473288cbe073SMarc-André Lureau static void spapr_machine_2_9_class_options(MachineClass *mc) 473388cbe073SMarc-André Lureau { 4734ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 473588cbe073SMarc-André Lureau static GlobalProperty compat[] = { 47366c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" }, 4737fa386d98SMarc-André Lureau }; 47383fa14fbeSDavid Gibson 47393fa14fbeSDavid Gibson spapr_machine_2_10_class_options(mc); 47403e803152SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); 474188cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 474246f7afa3SGreg Kurz smc->pre_2_10_has_unused_icps = true; 474352b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED; 47443fa14fbeSDavid Gibson } 47453fa14fbeSDavid Gibson 47463fa14fbeSDavid Gibson DEFINE_SPAPR_MACHINE(2_9, "2.9", false); 4747fa325e6cSDavid Gibson 4748fa325e6cSDavid Gibson /* 4749fa325e6cSDavid Gibson * pseries-2.8 4750fa325e6cSDavid Gibson */ 475188cbe073SMarc-André Lureau 475288cbe073SMarc-André Lureau static void spapr_machine_2_8_class_options(MachineClass *mc) 475388cbe073SMarc-André Lureau { 475488cbe073SMarc-André Lureau static GlobalProperty compat[] = { 47556c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" }, 4756fa386d98SMarc-André Lureau }; 4757fa325e6cSDavid Gibson 4758fa325e6cSDavid Gibson spapr_machine_2_9_class_options(mc); 4759edc24ccdSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); 476088cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 476155641213SLaurent Vivier mc->numa_mem_align_shift = 23; 4762fa325e6cSDavid Gibson } 4763fa325e6cSDavid Gibson 4764fa325e6cSDavid Gibson DEFINE_SPAPR_MACHINE(2_8, "2.8", false); 4765db800b21SDavid Gibson 4766db800b21SDavid Gibson /* 47671ea1eefcSBharata B Rao * pseries-2.7 47681ea1eefcSBharata B Rao */ 4769357d1e3bSDavid Gibson 4770ce2918cbSDavid Gibson static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, 4771357d1e3bSDavid Gibson uint64_t *buid, hwaddr *pio, 4772357d1e3bSDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 4773ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4774ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4775357d1e3bSDavid Gibson { 4776357d1e3bSDavid Gibson /* Legacy PHB placement for pseries-2.7 and earlier machine types */ 4777357d1e3bSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 4778357d1e3bSDavid Gibson const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */ 4779357d1e3bSDavid Gibson const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */ 4780357d1e3bSDavid Gibson const hwaddr pio_offset = 0x80000000; /* 2 GiB */ 4781357d1e3bSDavid Gibson const uint32_t max_index = 255; 4782357d1e3bSDavid Gibson const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */ 4783357d1e3bSDavid Gibson 4784357d1e3bSDavid Gibson uint64_t ram_top = MACHINE(spapr)->ram_size; 4785357d1e3bSDavid Gibson hwaddr phb0_base, phb_base; 4786357d1e3bSDavid Gibson int i; 4787357d1e3bSDavid Gibson 47880c9269a5SDavid Hildenbrand /* Do we have device memory? */ 4789357d1e3bSDavid Gibson if (MACHINE(spapr)->maxram_size > ram_top) { 4790357d1e3bSDavid Gibson /* Can't just use maxram_size, because there may be an 47910c9269a5SDavid Hildenbrand * alignment gap between normal and device memory regions 47920c9269a5SDavid Hildenbrand */ 4793b0c14ec4SDavid Hildenbrand ram_top = MACHINE(spapr)->device_memory->base + 4794b0c14ec4SDavid Hildenbrand memory_region_size(&MACHINE(spapr)->device_memory->mr); 4795357d1e3bSDavid Gibson } 4796357d1e3bSDavid Gibson 4797357d1e3bSDavid Gibson phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment); 4798357d1e3bSDavid Gibson 4799357d1e3bSDavid Gibson if (index > max_index) { 4800357d1e3bSDavid Gibson error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)", 4801357d1e3bSDavid Gibson max_index); 4802357d1e3bSDavid Gibson return; 4803357d1e3bSDavid Gibson } 4804357d1e3bSDavid Gibson 4805357d1e3bSDavid Gibson *buid = base_buid + index; 4806357d1e3bSDavid Gibson for (i = 0; i < n_dma; ++i) { 4807357d1e3bSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 4808357d1e3bSDavid Gibson } 4809357d1e3bSDavid Gibson 4810357d1e3bSDavid Gibson phb_base = phb0_base + index * phb_spacing; 4811357d1e3bSDavid Gibson *pio = phb_base + pio_offset; 4812357d1e3bSDavid Gibson *mmio32 = phb_base + mmio_offset; 4813357d1e3bSDavid Gibson /* 4814357d1e3bSDavid Gibson * We don't set the 64-bit MMIO window, relying on the PHB's 4815357d1e3bSDavid Gibson * fallback behaviour of automatically splitting a large "32-bit" 4816357d1e3bSDavid Gibson * window into contiguous 32-bit and 64-bit windows 4817357d1e3bSDavid Gibson */ 4818ec132efaSAlexey Kardashevskiy 4819ec132efaSAlexey Kardashevskiy *nv2gpa = 0; 4820ec132efaSAlexey Kardashevskiy *nv2atsd = 0; 4821357d1e3bSDavid Gibson } 4822db800b21SDavid Gibson 48231ea1eefcSBharata B Rao static void spapr_machine_2_7_class_options(MachineClass *mc) 48241ea1eefcSBharata B Rao { 4825ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 482688cbe073SMarc-André Lureau static GlobalProperty compat[] = { 48276c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", }, 48286c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", }, 48296c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", }, 48306c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", }, 483188cbe073SMarc-André Lureau }; 48323daa4a9fSThomas Huth 4833db800b21SDavid Gibson spapr_machine_2_8_class_options(mc); 48342e9c10ebSIgor Mammedov mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3"); 4835a140c199SEduardo Habkost mc->default_machine_opts = "modern-hotplug-events=off"; 48365a995064SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); 483788cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 4838357d1e3bSDavid Gibson smc->phb_placement = phb_placement_2_7; 48391ea1eefcSBharata B Rao } 48401ea1eefcSBharata B Rao 4841db800b21SDavid Gibson DEFINE_SPAPR_MACHINE(2_7, "2.7", false); 48421ea1eefcSBharata B Rao 48431ea1eefcSBharata B Rao /* 48444b23699cSDavid Gibson * pseries-2.6 48454b23699cSDavid Gibson */ 484688cbe073SMarc-André Lureau 484788cbe073SMarc-André Lureau static void spapr_machine_2_6_class_options(MachineClass *mc) 484888cbe073SMarc-André Lureau { 484988cbe073SMarc-André Lureau static GlobalProperty compat[] = { 48506c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" }, 4851fa386d98SMarc-André Lureau }; 48521ea1eefcSBharata B Rao 48531ea1eefcSBharata B Rao spapr_machine_2_7_class_options(mc); 4854c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = false; 4855ff8f261fSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); 485688cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 48574b23699cSDavid Gibson } 48584b23699cSDavid Gibson 48591ea1eefcSBharata B Rao DEFINE_SPAPR_MACHINE(2_6, "2.6", false); 48604b23699cSDavid Gibson 48614b23699cSDavid Gibson /* 48621c5f29bbSDavid Gibson * pseries-2.5 48631c5f29bbSDavid Gibson */ 486488cbe073SMarc-André Lureau 486588cbe073SMarc-André Lureau static void spapr_machine_2_5_class_options(MachineClass *mc) 486688cbe073SMarc-André Lureau { 4867ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 486888cbe073SMarc-André Lureau static GlobalProperty compat[] = { 48696c36bddfSEduardo Habkost { "spapr-vlan", "use-rx-buffer-pools", "off" }, 4870fa386d98SMarc-André Lureau }; 48714b23699cSDavid Gibson 48724b23699cSDavid Gibson spapr_machine_2_6_class_options(mc); 487357040d45SThomas Huth smc->use_ohci_by_default = true; 4874fe759610SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len); 487588cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 48761c5f29bbSDavid Gibson } 48771c5f29bbSDavid Gibson 48784b23699cSDavid Gibson DEFINE_SPAPR_MACHINE(2_5, "2.5", false); 48791c5f29bbSDavid Gibson 48801c5f29bbSDavid Gibson /* 48811c5f29bbSDavid Gibson * pseries-2.4 48821c5f29bbSDavid Gibson */ 488380fd50f9SCornelia Huck 48845013c547SDavid Gibson static void spapr_machine_2_4_class_options(MachineClass *mc) 48855013c547SDavid Gibson { 4886ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4887fc9f38c3SDavid Gibson 4888fc9f38c3SDavid Gibson spapr_machine_2_5_class_options(mc); 4889fc9f38c3SDavid Gibson smc->dr_lmb_enabled = false; 48902f99b9c2SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len); 48911c5f29bbSDavid Gibson } 48921c5f29bbSDavid Gibson 4893fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_4, "2.4", false); 48941c5f29bbSDavid Gibson 48951c5f29bbSDavid Gibson /* 48961c5f29bbSDavid Gibson * pseries-2.3 48971c5f29bbSDavid Gibson */ 489888cbe073SMarc-André Lureau 489988cbe073SMarc-André Lureau static void spapr_machine_2_3_class_options(MachineClass *mc) 490088cbe073SMarc-André Lureau { 490188cbe073SMarc-André Lureau static GlobalProperty compat[] = { 49026c36bddfSEduardo Habkost { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" }, 4903fa386d98SMarc-André Lureau }; 4904fc9f38c3SDavid Gibson spapr_machine_2_4_class_options(mc); 49058995dd90SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len); 490688cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 49071c5f29bbSDavid Gibson } 4908fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_3, "2.3", false); 49091c5f29bbSDavid Gibson 49101c5f29bbSDavid Gibson /* 49111c5f29bbSDavid Gibson * pseries-2.2 49121c5f29bbSDavid Gibson */ 491388cbe073SMarc-André Lureau 491488cbe073SMarc-André Lureau static void spapr_machine_2_2_class_options(MachineClass *mc) 491588cbe073SMarc-André Lureau { 491688cbe073SMarc-André Lureau static GlobalProperty compat[] = { 49176c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" }, 4918fa386d98SMarc-André Lureau }; 4919b194df47SAlexey Kardashevskiy 4920fc9f38c3SDavid Gibson spapr_machine_2_3_class_options(mc); 49211c30044eSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len); 492288cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 4923f6d0656bSEduardo Habkost mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on"; 49241c5f29bbSDavid Gibson } 4925fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_2, "2.2", false); 49261c5f29bbSDavid Gibson 49271c5f29bbSDavid Gibson /* 49281c5f29bbSDavid Gibson * pseries-2.1 49291c5f29bbSDavid Gibson */ 49301c5f29bbSDavid Gibson 49315013c547SDavid Gibson static void spapr_machine_2_1_class_options(MachineClass *mc) 4932b0e966d0SJason Wang { 4933fc9f38c3SDavid Gibson spapr_machine_2_2_class_options(mc); 4934c4fc5695SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len); 49356026db45SAlexey Kardashevskiy } 4936fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_1, "2.1", false); 49376026db45SAlexey Kardashevskiy 493829ee3247SAlexey Kardashevskiy static void spapr_machine_register_types(void) 493929ee3247SAlexey Kardashevskiy { 494029ee3247SAlexey Kardashevskiy type_register_static(&spapr_machine_info); 494129ee3247SAlexey Kardashevskiy } 494229ee3247SAlexey Kardashevskiy 494329ee3247SAlexey Kardashevskiy type_init(spapr_machine_register_types) 4944