153018216SPaolo Bonzini /* 253018216SPaolo Bonzini * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 353018216SPaolo Bonzini * 453018216SPaolo Bonzini * Copyright (c) 2004-2007 Fabrice Bellard 553018216SPaolo Bonzini * Copyright (c) 2007 Jocelyn Mayer 653018216SPaolo Bonzini * Copyright (c) 2010 David Gibson, IBM Corporation. 753018216SPaolo Bonzini * 853018216SPaolo Bonzini * Permission is hereby granted, free of charge, to any person obtaining a copy 953018216SPaolo Bonzini * of this software and associated documentation files (the "Software"), to deal 1053018216SPaolo Bonzini * in the Software without restriction, including without limitation the rights 1153018216SPaolo Bonzini * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1253018216SPaolo Bonzini * copies of the Software, and to permit persons to whom the Software is 1353018216SPaolo Bonzini * furnished to do so, subject to the following conditions: 1453018216SPaolo Bonzini * 1553018216SPaolo Bonzini * The above copyright notice and this permission notice shall be included in 1653018216SPaolo Bonzini * all copies or substantial portions of the Software. 1753018216SPaolo Bonzini * 1853018216SPaolo Bonzini * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1953018216SPaolo Bonzini * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2053018216SPaolo Bonzini * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 2153018216SPaolo Bonzini * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2253018216SPaolo Bonzini * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2353018216SPaolo Bonzini * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2453018216SPaolo Bonzini * THE SOFTWARE. 2553018216SPaolo Bonzini * 2653018216SPaolo Bonzini */ 270d75590dSPeter Maydell #include "qemu/osdep.h" 28da34e65cSMarkus Armbruster #include "qapi/error.h" 29fa98fbfcSSam Bobroff #include "qapi/visitor.h" 3053018216SPaolo Bonzini #include "sysemu/sysemu.h" 31e35704baSEduardo Habkost #include "sysemu/numa.h" 3253018216SPaolo Bonzini #include "hw/hw.h" 3303dd024fSPaolo Bonzini #include "qemu/log.h" 3471461b0fSAlexey Kardashevskiy #include "hw/fw-path-provider.h" 3553018216SPaolo Bonzini #include "elf.h" 3653018216SPaolo Bonzini #include "net/net.h" 37ad440b4aSAndrew Jones #include "sysemu/device_tree.h" 38fa1d36dfSMarkus Armbruster #include "sysemu/block-backend.h" 3953018216SPaolo Bonzini #include "sysemu/cpus.h" 40b3946626SVincent Palatin #include "sysemu/hw_accel.h" 4153018216SPaolo Bonzini #include "kvm_ppc.h" 42c4b63b7cSJuan Quintela #include "migration/misc.h" 4384a899deSJuan Quintela #include "migration/global_state.h" 44f2a8f0a6SJuan Quintela #include "migration/register.h" 454be21d56SDavid Gibson #include "mmu-hash64.h" 46b4db5413SSuraj Jitindar Singh #include "mmu-book3s-v3.h" 477abd43baSSuraj Jitindar Singh #include "cpu-models.h" 483794d548SAlexey Kardashevskiy #include "qom/cpu.h" 4953018216SPaolo Bonzini 5053018216SPaolo Bonzini #include "hw/boards.h" 510d09e41aSPaolo Bonzini #include "hw/ppc/ppc.h" 5253018216SPaolo Bonzini #include "hw/loader.h" 5353018216SPaolo Bonzini 547804c353SCédric Le Goater #include "hw/ppc/fdt.h" 550d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h" 560d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h" 570d09e41aSPaolo Bonzini #include "hw/pci-host/spapr.h" 580d09e41aSPaolo Bonzini #include "hw/ppc/xics.h" 5953018216SPaolo Bonzini #include "hw/pci/msi.h" 6053018216SPaolo Bonzini 6153018216SPaolo Bonzini #include "hw/pci/pci.h" 6271461b0fSAlexey Kardashevskiy #include "hw/scsi/scsi.h" 6371461b0fSAlexey Kardashevskiy #include "hw/virtio/virtio-scsi.h" 64c4e13492SFelipe Franciosi #include "hw/virtio/vhost-scsi-common.h" 6553018216SPaolo Bonzini 6653018216SPaolo Bonzini #include "exec/address-spaces.h" 6753018216SPaolo Bonzini #include "hw/usb.h" 6853018216SPaolo Bonzini #include "qemu/config-file.h" 69135a129aSAneesh Kumar K.V #include "qemu/error-report.h" 702a6593cbSAlexey Kardashevskiy #include "trace.h" 7134316482SAlexey Kardashevskiy #include "hw/nmi.h" 726449da45SCédric Le Goater #include "hw/intc/intc.h" 7353018216SPaolo Bonzini 7468a27b20SMichael S. Tsirkin #include "hw/compat.h" 75f348b6d1SVeronia Bahaa #include "qemu/cutils.h" 7694a94e4cSBharata B Rao #include "hw/ppc/spapr_cpu_core.h" 772474bfd4SIgor Mammedov #include "qmp-commands.h" 7868a27b20SMichael S. Tsirkin 7953018216SPaolo Bonzini #include <libfdt.h> 8053018216SPaolo Bonzini 8153018216SPaolo Bonzini /* SLOF memory layout: 8253018216SPaolo Bonzini * 8353018216SPaolo Bonzini * SLOF raw image loaded at 0, copies its romfs right below the flat 8453018216SPaolo Bonzini * device-tree, then position SLOF itself 31M below that 8553018216SPaolo Bonzini * 8653018216SPaolo Bonzini * So we set FW_OVERHEAD to 40MB which should account for all of that 8753018216SPaolo Bonzini * and more 8853018216SPaolo Bonzini * 8953018216SPaolo Bonzini * We load our kernel at 4M, leaving space for SLOF initial image 9053018216SPaolo Bonzini */ 9138b02bd8SAlexey Kardashevskiy #define FDT_MAX_SIZE 0x100000 9253018216SPaolo Bonzini #define RTAS_MAX_SIZE 0x10000 93b7d1f77aSBenjamin Herrenschmidt #define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */ 9453018216SPaolo Bonzini #define FW_MAX_SIZE 0x400000 9553018216SPaolo Bonzini #define FW_FILE_NAME "slof.bin" 9653018216SPaolo Bonzini #define FW_OVERHEAD 0x2800000 9753018216SPaolo Bonzini #define KERNEL_LOAD_ADDR FW_MAX_SIZE 9853018216SPaolo Bonzini 9953018216SPaolo Bonzini #define MIN_RMA_SLOF 128UL 10053018216SPaolo Bonzini 10153018216SPaolo Bonzini #define PHANDLE_XICP 0x00001111 10253018216SPaolo Bonzini 10371cd4dacSCédric Le Goater static ICSState *spapr_ics_create(sPAPRMachineState *spapr, 10471cd4dacSCédric Le Goater const char *type_ics, 105817bb6a4SCédric Le Goater int nr_irqs, Error **errp) 106c04d6cfaSAnthony Liguori { 107175d2aa0SGreg Kurz Error *local_err = NULL; 10871cd4dacSCédric Le Goater Object *obj; 109c04d6cfaSAnthony Liguori 11071cd4dacSCédric Le Goater obj = object_new(type_ics); 111175d2aa0SGreg Kurz object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort); 112ad265631SGreg Kurz object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), 113ad265631SGreg Kurz &error_abort); 114175d2aa0SGreg Kurz object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err); 115175d2aa0SGreg Kurz if (local_err) { 116175d2aa0SGreg Kurz goto error; 117175d2aa0SGreg Kurz } 11871cd4dacSCédric Le Goater object_property_set_bool(obj, true, "realized", &local_err); 119175d2aa0SGreg Kurz if (local_err) { 120175d2aa0SGreg Kurz goto error; 121c04d6cfaSAnthony Liguori } 122c04d6cfaSAnthony Liguori 12371cd4dacSCédric Le Goater return ICS_SIMPLE(obj); 124175d2aa0SGreg Kurz 125175d2aa0SGreg Kurz error: 126175d2aa0SGreg Kurz error_propagate(errp, local_err); 127175d2aa0SGreg Kurz return NULL; 1285bc8d26dSCédric Le Goater } 1295bc8d26dSCédric Le Goater 13046f7afa3SGreg Kurz static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque) 13146f7afa3SGreg Kurz { 13246f7afa3SGreg Kurz /* Dummy entries correspond to unused ICPState objects in older QEMUs, 13346f7afa3SGreg Kurz * and newer QEMUs don't even have them. In both cases, we don't want 13446f7afa3SGreg Kurz * to send anything on the wire. 13546f7afa3SGreg Kurz */ 13646f7afa3SGreg Kurz return false; 13746f7afa3SGreg Kurz } 13846f7afa3SGreg Kurz 13946f7afa3SGreg Kurz static const VMStateDescription pre_2_10_vmstate_dummy_icp = { 14046f7afa3SGreg Kurz .name = "icp/server", 14146f7afa3SGreg Kurz .version_id = 1, 14246f7afa3SGreg Kurz .minimum_version_id = 1, 14346f7afa3SGreg Kurz .needed = pre_2_10_vmstate_dummy_icp_needed, 14446f7afa3SGreg Kurz .fields = (VMStateField[]) { 14546f7afa3SGreg Kurz VMSTATE_UNUSED(4), /* uint32_t xirr */ 14646f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t pending_priority */ 14746f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t mfrr */ 14846f7afa3SGreg Kurz VMSTATE_END_OF_LIST() 14946f7afa3SGreg Kurz }, 15046f7afa3SGreg Kurz }; 15146f7afa3SGreg Kurz 15246f7afa3SGreg Kurz static void pre_2_10_vmstate_register_dummy_icp(int i) 15346f7afa3SGreg Kurz { 15446f7afa3SGreg Kurz vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp, 15546f7afa3SGreg Kurz (void *)(uintptr_t) i); 15646f7afa3SGreg Kurz } 15746f7afa3SGreg Kurz 15846f7afa3SGreg Kurz static void pre_2_10_vmstate_unregister_dummy_icp(int i) 15946f7afa3SGreg Kurz { 16046f7afa3SGreg Kurz vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp, 16146f7afa3SGreg Kurz (void *)(uintptr_t) i); 16246f7afa3SGreg Kurz } 16346f7afa3SGreg Kurz 16446f7afa3SGreg Kurz static inline int xics_max_server_number(void) 16546f7afa3SGreg Kurz { 16646f7afa3SGreg Kurz return DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(), smp_threads); 16746f7afa3SGreg Kurz } 16846f7afa3SGreg Kurz 16971cd4dacSCédric Le Goater static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp) 170c04d6cfaSAnthony Liguori { 17171cd4dacSCédric Le Goater sPAPRMachineState *spapr = SPAPR_MACHINE(machine); 17246f7afa3SGreg Kurz sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 173c04d6cfaSAnthony Liguori 17411ad93f6SDavid Gibson if (kvm_enabled()) { 1752192a930SCédric Le Goater if (machine_kernel_irqchip_allowed(machine) && 17671cd4dacSCédric Le Goater !xics_kvm_init(spapr, errp)) { 17771cd4dacSCédric Le Goater spapr->icp_type = TYPE_KVM_ICP; 1783d85885aSGreg Kurz spapr->ics = spapr_ics_create(spapr, TYPE_ICS_KVM, nr_irqs, errp); 17911ad93f6SDavid Gibson } 18071cd4dacSCédric Le Goater if (machine_kernel_irqchip_required(machine) && !spapr->ics) { 1813d85885aSGreg Kurz error_prepend(errp, "kernel_irqchip requested but unavailable: "); 1823d85885aSGreg Kurz return; 18311ad93f6SDavid Gibson } 184b83baa60SMarkus Armbruster } 18511ad93f6SDavid Gibson 18671cd4dacSCédric Le Goater if (!spapr->ics) { 187f63ebfe0SGreg Kurz xics_spapr_init(spapr); 18871cd4dacSCédric Le Goater spapr->icp_type = TYPE_ICP; 18971cd4dacSCédric Le Goater spapr->ics = spapr_ics_create(spapr, TYPE_ICS_SIMPLE, nr_irqs, errp); 1903d85885aSGreg Kurz if (!spapr->ics) { 1913d85885aSGreg Kurz return; 1923d85885aSGreg Kurz } 193c04d6cfaSAnthony Liguori } 19446f7afa3SGreg Kurz 19546f7afa3SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 19646f7afa3SGreg Kurz int i; 19746f7afa3SGreg Kurz 19846f7afa3SGreg Kurz for (i = 0; i < xics_max_server_number(); i++) { 19946f7afa3SGreg Kurz /* Dummy entries get deregistered when real ICPState objects 20046f7afa3SGreg Kurz * are registered during CPU core hotplug. 20146f7afa3SGreg Kurz */ 20246f7afa3SGreg Kurz pre_2_10_vmstate_register_dummy_icp(i); 20346f7afa3SGreg Kurz } 20446f7afa3SGreg Kurz } 205c04d6cfaSAnthony Liguori } 206c04d6cfaSAnthony Liguori 207833d4668SAlexey Kardashevskiy static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, 208833d4668SAlexey Kardashevskiy int smt_threads) 209833d4668SAlexey Kardashevskiy { 210833d4668SAlexey Kardashevskiy int i, ret = 0; 211833d4668SAlexey Kardashevskiy uint32_t servers_prop[smt_threads]; 212833d4668SAlexey Kardashevskiy uint32_t gservers_prop[smt_threads * 2]; 2132e886fb3SSam Bobroff int index = spapr_vcpu_id(cpu); 214833d4668SAlexey Kardashevskiy 215d6e166c0SDavid Gibson if (cpu->compat_pvr) { 216d6e166c0SDavid Gibson ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr); 2176d9412eaSAlexey Kardashevskiy if (ret < 0) { 2186d9412eaSAlexey Kardashevskiy return ret; 2196d9412eaSAlexey Kardashevskiy } 2206d9412eaSAlexey Kardashevskiy } 2216d9412eaSAlexey Kardashevskiy 222833d4668SAlexey Kardashevskiy /* Build interrupt servers and gservers properties */ 223833d4668SAlexey Kardashevskiy for (i = 0; i < smt_threads; i++) { 224833d4668SAlexey Kardashevskiy servers_prop[i] = cpu_to_be32(index + i); 225833d4668SAlexey Kardashevskiy /* Hack, direct the group queues back to cpu 0 */ 226833d4668SAlexey Kardashevskiy gservers_prop[i*2] = cpu_to_be32(index + i); 227833d4668SAlexey Kardashevskiy gservers_prop[i*2 + 1] = 0; 228833d4668SAlexey Kardashevskiy } 229833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", 230833d4668SAlexey Kardashevskiy servers_prop, sizeof(servers_prop)); 231833d4668SAlexey Kardashevskiy if (ret < 0) { 232833d4668SAlexey Kardashevskiy return ret; 233833d4668SAlexey Kardashevskiy } 234833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s", 235833d4668SAlexey Kardashevskiy gservers_prop, sizeof(gservers_prop)); 236833d4668SAlexey Kardashevskiy 237833d4668SAlexey Kardashevskiy return ret; 238833d4668SAlexey Kardashevskiy } 239833d4668SAlexey Kardashevskiy 24099861ecbSIgor Mammedov static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, PowerPCCPU *cpu) 2410da6f3feSBharata B Rao { 2422e886fb3SSam Bobroff int index = spapr_vcpu_id(cpu); 2430da6f3feSBharata B Rao uint32_t associativity[] = {cpu_to_be32(0x5), 2440da6f3feSBharata B Rao cpu_to_be32(0x0), 2450da6f3feSBharata B Rao cpu_to_be32(0x0), 2460da6f3feSBharata B Rao cpu_to_be32(0x0), 24715f8b142SIgor Mammedov cpu_to_be32(cpu->node_id), 2480da6f3feSBharata B Rao cpu_to_be32(index)}; 2490da6f3feSBharata B Rao 2500da6f3feSBharata B Rao /* Advertise NUMA via ibm,associativity */ 25199861ecbSIgor Mammedov return fdt_setprop(fdt, offset, "ibm,associativity", associativity, 2520da6f3feSBharata B Rao sizeof(associativity)); 2530da6f3feSBharata B Rao } 2540da6f3feSBharata B Rao 25586d5771aSSam Bobroff /* Populate the "ibm,pa-features" property */ 256*ee76a09fSDavid Gibson static void spapr_populate_pa_features(sPAPRMachineState *spapr, 257*ee76a09fSDavid Gibson PowerPCCPU *cpu, 258*ee76a09fSDavid Gibson void *fdt, int offset, 259e957f6a9SSam Bobroff bool legacy_guest) 26086d5771aSSam Bobroff { 2617abd43baSSuraj Jitindar Singh CPUPPCState *env = &cpu->env; 26286d5771aSSam Bobroff uint8_t pa_features_206[] = { 6, 0, 26386d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 }; 26486d5771aSSam Bobroff uint8_t pa_features_207[] = { 24, 0, 26586d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, 26686d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 26786d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 26886d5771aSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; 2699fb4541fSSam Bobroff uint8_t pa_features_300[] = { 66, 0, 2709fb4541fSSam Bobroff /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ 2719fb4541fSSam Bobroff /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */ 27286d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ 2739fb4541fSSam Bobroff /* 6: DS207 */ 27486d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ 2759fb4541fSSam Bobroff /* 16: Vector */ 27686d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ 2779fb4541fSSam Bobroff /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ 2789bf502feSDavid Gibson 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ 2799fb4541fSSam Bobroff /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ 2809fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ 2819fb4541fSSam Bobroff /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */ 2829fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ 2839fb4541fSSam Bobroff /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */ 2849fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */ 2859fb4541fSSam Bobroff /* 42: PM, 44: PC RA, 46: SC vec'd */ 2869fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ 2879fb4541fSSam Bobroff /* 48: SIMD, 50: QP BFP, 52: String */ 2889fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ 2899fb4541fSSam Bobroff /* 54: DecFP, 56: DecI, 58: SHA */ 2909fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ 2919fb4541fSSam Bobroff /* 60: NM atomic, 62: RNG */ 2929fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ 2939fb4541fSSam Bobroff }; 2947abd43baSSuraj Jitindar Singh uint8_t *pa_features = NULL; 29586d5771aSSam Bobroff size_t pa_size; 29686d5771aSSam Bobroff 2977abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) { 29886d5771aSSam Bobroff pa_features = pa_features_206; 29986d5771aSSam Bobroff pa_size = sizeof(pa_features_206); 3007abd43baSSuraj Jitindar Singh } 3017abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) { 30286d5771aSSam Bobroff pa_features = pa_features_207; 30386d5771aSSam Bobroff pa_size = sizeof(pa_features_207); 3047abd43baSSuraj Jitindar Singh } 3057abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) { 30686d5771aSSam Bobroff pa_features = pa_features_300; 30786d5771aSSam Bobroff pa_size = sizeof(pa_features_300); 3087abd43baSSuraj Jitindar Singh } 3097abd43baSSuraj Jitindar Singh if (!pa_features) { 31086d5771aSSam Bobroff return; 31186d5771aSSam Bobroff } 31286d5771aSSam Bobroff 31386d5771aSSam Bobroff if (env->ci_large_pages) { 31486d5771aSSam Bobroff /* 31586d5771aSSam Bobroff * Note: we keep CI large pages off by default because a 64K capable 31686d5771aSSam Bobroff * guest provisioned with large pages might otherwise try to map a qemu 31786d5771aSSam Bobroff * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages 31886d5771aSSam Bobroff * even if that qemu runs on a 4k host. 31986d5771aSSam Bobroff * We dd this bit back here if we are confident this is not an issue 32086d5771aSSam Bobroff */ 32186d5771aSSam Bobroff pa_features[3] |= 0x20; 32286d5771aSSam Bobroff } 323*ee76a09fSDavid Gibson if (spapr_has_cap(spapr, SPAPR_CAP_HTM) && pa_size > 24) { 32486d5771aSSam Bobroff pa_features[24] |= 0x80; /* Transactional memory support */ 32586d5771aSSam Bobroff } 326e957f6a9SSam Bobroff if (legacy_guest && pa_size > 40) { 327e957f6a9SSam Bobroff /* Workaround for broken kernels that attempt (guest) radix 328e957f6a9SSam Bobroff * mode when they can't handle it, if they see the radix bit set 329e957f6a9SSam Bobroff * in pa-features. So hide it from them. */ 330e957f6a9SSam Bobroff pa_features[40 + 2] &= ~0x80; /* Radix MMU */ 331e957f6a9SSam Bobroff } 33286d5771aSSam Bobroff 33386d5771aSSam Bobroff _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size))); 33486d5771aSSam Bobroff } 33586d5771aSSam Bobroff 33628e02042SDavid Gibson static int spapr_fixup_cpu_dt(void *fdt, sPAPRMachineState *spapr) 33753018216SPaolo Bonzini { 33882677ed2SAlexey Kardashevskiy int ret = 0, offset, cpus_offset; 33982677ed2SAlexey Kardashevskiy CPUState *cs; 34053018216SPaolo Bonzini char cpu_model[32]; 34153018216SPaolo Bonzini int smt = kvmppc_smt_threads(); 34253018216SPaolo Bonzini uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; 34353018216SPaolo Bonzini 34482677ed2SAlexey Kardashevskiy CPU_FOREACH(cs) { 34582677ed2SAlexey Kardashevskiy PowerPCCPU *cpu = POWERPC_CPU(cs); 34682677ed2SAlexey Kardashevskiy DeviceClass *dc = DEVICE_GET_CLASS(cs); 3472e886fb3SSam Bobroff int index = spapr_vcpu_id(cpu); 34812dbeb16SDavid Gibson int compat_smt = MIN(smp_threads, ppc_compat_max_threads(cpu)); 34953018216SPaolo Bonzini 3500f20ba62SAlexey Kardashevskiy if ((index % smt) != 0) { 35153018216SPaolo Bonzini continue; 35253018216SPaolo Bonzini } 35353018216SPaolo Bonzini 35482677ed2SAlexey Kardashevskiy snprintf(cpu_model, 32, "%s@%x", dc->fw_name, index); 35553018216SPaolo Bonzini 35682677ed2SAlexey Kardashevskiy cpus_offset = fdt_path_offset(fdt, "/cpus"); 35782677ed2SAlexey Kardashevskiy if (cpus_offset < 0) { 358a4f3885cSGreg Kurz cpus_offset = fdt_add_subnode(fdt, 0, "cpus"); 35982677ed2SAlexey Kardashevskiy if (cpus_offset < 0) { 36082677ed2SAlexey Kardashevskiy return cpus_offset; 36182677ed2SAlexey Kardashevskiy } 36282677ed2SAlexey Kardashevskiy } 36382677ed2SAlexey Kardashevskiy offset = fdt_subnode_offset(fdt, cpus_offset, cpu_model); 36482677ed2SAlexey Kardashevskiy if (offset < 0) { 36582677ed2SAlexey Kardashevskiy offset = fdt_add_subnode(fdt, cpus_offset, cpu_model); 36653018216SPaolo Bonzini if (offset < 0) { 36753018216SPaolo Bonzini return offset; 36853018216SPaolo Bonzini } 36982677ed2SAlexey Kardashevskiy } 37053018216SPaolo Bonzini 3710da6f3feSBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,pft-size", 3720da6f3feSBharata B Rao pft_size_prop, sizeof(pft_size_prop)); 37353018216SPaolo Bonzini if (ret < 0) { 37453018216SPaolo Bonzini return ret; 37553018216SPaolo Bonzini } 37653018216SPaolo Bonzini 37799861ecbSIgor Mammedov if (nb_numa_nodes > 1) { 37899861ecbSIgor Mammedov ret = spapr_fixup_cpu_numa_dt(fdt, offset, cpu); 37953018216SPaolo Bonzini if (ret < 0) { 38053018216SPaolo Bonzini return ret; 38153018216SPaolo Bonzini } 38299861ecbSIgor Mammedov } 383833d4668SAlexey Kardashevskiy 38412dbeb16SDavid Gibson ret = spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt); 385833d4668SAlexey Kardashevskiy if (ret < 0) { 386833d4668SAlexey Kardashevskiy return ret; 387833d4668SAlexey Kardashevskiy } 388e957f6a9SSam Bobroff 389*ee76a09fSDavid Gibson spapr_populate_pa_features(spapr, cpu, fdt, offset, 390e957f6a9SSam Bobroff spapr->cas_legacy_guest_workaround); 39153018216SPaolo Bonzini } 39253018216SPaolo Bonzini return ret; 39353018216SPaolo Bonzini } 39453018216SPaolo Bonzini 395c86c1affSDaniel Henrique Barboza static hwaddr spapr_node0_size(MachineState *machine) 396b082d65aSAlexey Kardashevskiy { 397b082d65aSAlexey Kardashevskiy if (nb_numa_nodes) { 398b082d65aSAlexey Kardashevskiy int i; 399b082d65aSAlexey Kardashevskiy for (i = 0; i < nb_numa_nodes; ++i) { 400b082d65aSAlexey Kardashevskiy if (numa_info[i].node_mem) { 401fb164994SDavid Gibson return MIN(pow2floor(numa_info[i].node_mem), 402fb164994SDavid Gibson machine->ram_size); 403b082d65aSAlexey Kardashevskiy } 404b082d65aSAlexey Kardashevskiy } 405b082d65aSAlexey Kardashevskiy } 406fb164994SDavid Gibson return machine->ram_size; 407b082d65aSAlexey Kardashevskiy } 408b082d65aSAlexey Kardashevskiy 409a1d59c0fSAlexey Kardashevskiy static void add_str(GString *s, const gchar *s1) 410a1d59c0fSAlexey Kardashevskiy { 411a1d59c0fSAlexey Kardashevskiy g_string_append_len(s, s1, strlen(s1) + 1); 412a1d59c0fSAlexey Kardashevskiy } 41353018216SPaolo Bonzini 41403d196b7SBharata B Rao static int spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start, 41526a8c353SAlexey Kardashevskiy hwaddr size) 41626a8c353SAlexey Kardashevskiy { 41726a8c353SAlexey Kardashevskiy uint32_t associativity[] = { 41826a8c353SAlexey Kardashevskiy cpu_to_be32(0x4), /* length */ 41926a8c353SAlexey Kardashevskiy cpu_to_be32(0x0), cpu_to_be32(0x0), 420c3b4f589SAlexey Kardashevskiy cpu_to_be32(0x0), cpu_to_be32(nodeid) 42126a8c353SAlexey Kardashevskiy }; 42226a8c353SAlexey Kardashevskiy char mem_name[32]; 42326a8c353SAlexey Kardashevskiy uint64_t mem_reg_property[2]; 42426a8c353SAlexey Kardashevskiy int off; 42526a8c353SAlexey Kardashevskiy 42626a8c353SAlexey Kardashevskiy mem_reg_property[0] = cpu_to_be64(start); 42726a8c353SAlexey Kardashevskiy mem_reg_property[1] = cpu_to_be64(size); 42826a8c353SAlexey Kardashevskiy 42926a8c353SAlexey Kardashevskiy sprintf(mem_name, "memory@" TARGET_FMT_lx, start); 43026a8c353SAlexey Kardashevskiy off = fdt_add_subnode(fdt, 0, mem_name); 43126a8c353SAlexey Kardashevskiy _FDT(off); 43226a8c353SAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 43326a8c353SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 43426a8c353SAlexey Kardashevskiy sizeof(mem_reg_property)))); 43526a8c353SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, 43626a8c353SAlexey Kardashevskiy sizeof(associativity)))); 43703d196b7SBharata B Rao return off; 43826a8c353SAlexey Kardashevskiy } 43926a8c353SAlexey Kardashevskiy 44028e02042SDavid Gibson static int spapr_populate_memory(sPAPRMachineState *spapr, void *fdt) 44153018216SPaolo Bonzini { 442fb164994SDavid Gibson MachineState *machine = MACHINE(spapr); 4437db8a127SAlexey Kardashevskiy hwaddr mem_start, node_size; 4447db8a127SAlexey Kardashevskiy int i, nb_nodes = nb_numa_nodes; 4457db8a127SAlexey Kardashevskiy NodeInfo *nodes = numa_info; 4467db8a127SAlexey Kardashevskiy NodeInfo ramnode; 44753018216SPaolo Bonzini 4487db8a127SAlexey Kardashevskiy /* No NUMA nodes, assume there is just one node with whole RAM */ 4497db8a127SAlexey Kardashevskiy if (!nb_numa_nodes) { 4507db8a127SAlexey Kardashevskiy nb_nodes = 1; 451fb164994SDavid Gibson ramnode.node_mem = machine->ram_size; 4527db8a127SAlexey Kardashevskiy nodes = &ramnode; 4535fe269b1SPaul Mackerras } 45453018216SPaolo Bonzini 4557db8a127SAlexey Kardashevskiy for (i = 0, mem_start = 0; i < nb_nodes; ++i) { 4567db8a127SAlexey Kardashevskiy if (!nodes[i].node_mem) { 4577db8a127SAlexey Kardashevskiy continue; 45853018216SPaolo Bonzini } 459fb164994SDavid Gibson if (mem_start >= machine->ram_size) { 4605fe269b1SPaul Mackerras node_size = 0; 4615fe269b1SPaul Mackerras } else { 4627db8a127SAlexey Kardashevskiy node_size = nodes[i].node_mem; 463fb164994SDavid Gibson if (node_size > machine->ram_size - mem_start) { 464fb164994SDavid Gibson node_size = machine->ram_size - mem_start; 4655fe269b1SPaul Mackerras } 4665fe269b1SPaul Mackerras } 4677db8a127SAlexey Kardashevskiy if (!mem_start) { 4687db8a127SAlexey Kardashevskiy /* ppc_spapr_init() checks for rma_size <= node0_size already */ 469e8f986fcSBharata B Rao spapr_populate_memory_node(fdt, i, 0, spapr->rma_size); 4707db8a127SAlexey Kardashevskiy mem_start += spapr->rma_size; 4717db8a127SAlexey Kardashevskiy node_size -= spapr->rma_size; 4727db8a127SAlexey Kardashevskiy } 4736010818cSAlexey Kardashevskiy for ( ; node_size; ) { 4746010818cSAlexey Kardashevskiy hwaddr sizetmp = pow2floor(node_size); 4756010818cSAlexey Kardashevskiy 4766010818cSAlexey Kardashevskiy /* mem_start != 0 here */ 4776010818cSAlexey Kardashevskiy if (ctzl(mem_start) < ctzl(sizetmp)) { 4786010818cSAlexey Kardashevskiy sizetmp = 1ULL << ctzl(mem_start); 4796010818cSAlexey Kardashevskiy } 4806010818cSAlexey Kardashevskiy 4816010818cSAlexey Kardashevskiy spapr_populate_memory_node(fdt, i, mem_start, sizetmp); 4826010818cSAlexey Kardashevskiy node_size -= sizetmp; 4836010818cSAlexey Kardashevskiy mem_start += sizetmp; 4846010818cSAlexey Kardashevskiy } 48553018216SPaolo Bonzini } 48653018216SPaolo Bonzini 48753018216SPaolo Bonzini return 0; 48853018216SPaolo Bonzini } 48953018216SPaolo Bonzini 4900da6f3feSBharata B Rao static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset, 4910da6f3feSBharata B Rao sPAPRMachineState *spapr) 4920da6f3feSBharata B Rao { 4930da6f3feSBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(cs); 4940da6f3feSBharata B Rao CPUPPCState *env = &cpu->env; 4950da6f3feSBharata B Rao PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 4962e886fb3SSam Bobroff int index = spapr_vcpu_id(cpu); 4970da6f3feSBharata B Rao uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 4980da6f3feSBharata B Rao 0xffffffff, 0xffffffff}; 499afd10a0fSBharata B Rao uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() 500afd10a0fSBharata B Rao : SPAPR_TIMEBASE_FREQ; 5010da6f3feSBharata B Rao uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; 5020da6f3feSBharata B Rao uint32_t page_sizes_prop[64]; 5030da6f3feSBharata B Rao size_t page_sizes_prop_size; 50422419c2aSDavid Gibson uint32_t vcpus_per_socket = smp_threads * smp_cores; 5050da6f3feSBharata B Rao uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; 50612dbeb16SDavid Gibson int compat_smt = MIN(smp_threads, ppc_compat_max_threads(cpu)); 507af81cf32SBharata B Rao sPAPRDRConnector *drc; 508af81cf32SBharata B Rao int drc_index; 509c64abd1fSSam Bobroff uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ]; 510c64abd1fSSam Bobroff int i; 511af81cf32SBharata B Rao 512fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index); 513af81cf32SBharata B Rao if (drc) { 5140b55aa91SDavid Gibson drc_index = spapr_drc_index(drc); 515af81cf32SBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index))); 516af81cf32SBharata B Rao } 5170da6f3feSBharata B Rao 5180da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "reg", index))); 5190da6f3feSBharata B Rao _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu"))); 5200da6f3feSBharata B Rao 5210da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR]))); 5220da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size", 5230da6f3feSBharata B Rao env->dcache_line_size))); 5240da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size", 5250da6f3feSBharata B Rao env->dcache_line_size))); 5260da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size", 5270da6f3feSBharata B Rao env->icache_line_size))); 5280da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size", 5290da6f3feSBharata B Rao env->icache_line_size))); 5300da6f3feSBharata B Rao 5310da6f3feSBharata B Rao if (pcc->l1_dcache_size) { 5320da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size", 5330da6f3feSBharata B Rao pcc->l1_dcache_size))); 5340da6f3feSBharata B Rao } else { 5353dc6f869SAlistair Francis warn_report("Unknown L1 dcache size for cpu"); 5360da6f3feSBharata B Rao } 5370da6f3feSBharata B Rao if (pcc->l1_icache_size) { 5380da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size", 5390da6f3feSBharata B Rao pcc->l1_icache_size))); 5400da6f3feSBharata B Rao } else { 5413dc6f869SAlistair Francis warn_report("Unknown L1 icache size for cpu"); 5420da6f3feSBharata B Rao } 5430da6f3feSBharata B Rao 5440da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq))); 5450da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq))); 546fd5da5c4SThomas Huth _FDT((fdt_setprop_cell(fdt, offset, "slb-size", env->slb_nr))); 5470da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", env->slb_nr))); 5480da6f3feSBharata B Rao _FDT((fdt_setprop_string(fdt, offset, "status", "okay"))); 5490da6f3feSBharata B Rao _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0))); 5500da6f3feSBharata B Rao 5510da6f3feSBharata B Rao if (env->spr_cb[SPR_PURR].oea_read) { 5520da6f3feSBharata B Rao _FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0))); 5530da6f3feSBharata B Rao } 5540da6f3feSBharata B Rao 5550da6f3feSBharata B Rao if (env->mmu_model & POWERPC_MMU_1TSEG) { 5560da6f3feSBharata B Rao _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes", 5570da6f3feSBharata B Rao segs, sizeof(segs)))); 5580da6f3feSBharata B Rao } 5590da6f3feSBharata B Rao 5600da6f3feSBharata B Rao /* Advertise VMX/VSX (vector extensions) if available 5610da6f3feSBharata B Rao * 0 / no property == no vector extensions 5620da6f3feSBharata B Rao * 1 == VMX / Altivec available 5630da6f3feSBharata B Rao * 2 == VSX available */ 5640da6f3feSBharata B Rao if (env->insns_flags & PPC_ALTIVEC) { 5650da6f3feSBharata B Rao uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1; 5660da6f3feSBharata B Rao 5670da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", vmx))); 5680da6f3feSBharata B Rao } 5690da6f3feSBharata B Rao 5700da6f3feSBharata B Rao /* Advertise DFP (Decimal Floating Point) if available 5710da6f3feSBharata B Rao * 0 / no property == no DFP 5720da6f3feSBharata B Rao * 1 == DFP available */ 5730da6f3feSBharata B Rao if (env->insns_flags2 & PPC2_DFP) { 5740da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1))); 5750da6f3feSBharata B Rao } 5760da6f3feSBharata B Rao 5773654fa95SCédric Le Goater page_sizes_prop_size = ppc_create_page_sizes_prop(env, page_sizes_prop, 5780da6f3feSBharata B Rao sizeof(page_sizes_prop)); 5790da6f3feSBharata B Rao if (page_sizes_prop_size) { 5800da6f3feSBharata B Rao _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes", 5810da6f3feSBharata B Rao page_sizes_prop, page_sizes_prop_size))); 5820da6f3feSBharata B Rao } 5830da6f3feSBharata B Rao 584*ee76a09fSDavid Gibson spapr_populate_pa_features(spapr, cpu, fdt, offset, false); 58590da0d5aSBenjamin Herrenschmidt 5860da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", 58722419c2aSDavid Gibson cs->cpu_index / vcpus_per_socket))); 5880da6f3feSBharata B Rao 5890da6f3feSBharata B Rao _FDT((fdt_setprop(fdt, offset, "ibm,pft-size", 5900da6f3feSBharata B Rao pft_size_prop, sizeof(pft_size_prop)))); 5910da6f3feSBharata B Rao 59299861ecbSIgor Mammedov if (nb_numa_nodes > 1) { 59399861ecbSIgor Mammedov _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cpu)); 59499861ecbSIgor Mammedov } 5950da6f3feSBharata B Rao 59612dbeb16SDavid Gibson _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt)); 597c64abd1fSSam Bobroff 598c64abd1fSSam Bobroff if (pcc->radix_page_info) { 599c64abd1fSSam Bobroff for (i = 0; i < pcc->radix_page_info->count; i++) { 600c64abd1fSSam Bobroff radix_AP_encodings[i] = 601c64abd1fSSam Bobroff cpu_to_be32(pcc->radix_page_info->entries[i]); 602c64abd1fSSam Bobroff } 603c64abd1fSSam Bobroff _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings", 604c64abd1fSSam Bobroff radix_AP_encodings, 605c64abd1fSSam Bobroff pcc->radix_page_info->count * 606c64abd1fSSam Bobroff sizeof(radix_AP_encodings[0])))); 607c64abd1fSSam Bobroff } 6080da6f3feSBharata B Rao } 6090da6f3feSBharata B Rao 6100da6f3feSBharata B Rao static void spapr_populate_cpus_dt_node(void *fdt, sPAPRMachineState *spapr) 6110da6f3feSBharata B Rao { 6120da6f3feSBharata B Rao CPUState *cs; 6130da6f3feSBharata B Rao int cpus_offset; 6140da6f3feSBharata B Rao char *nodename; 6150da6f3feSBharata B Rao int smt = kvmppc_smt_threads(); 6160da6f3feSBharata B Rao 6170da6f3feSBharata B Rao cpus_offset = fdt_add_subnode(fdt, 0, "cpus"); 6180da6f3feSBharata B Rao _FDT(cpus_offset); 6190da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1))); 6200da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0))); 6210da6f3feSBharata B Rao 6220da6f3feSBharata B Rao /* 6230da6f3feSBharata B Rao * We walk the CPUs in reverse order to ensure that CPU DT nodes 6240da6f3feSBharata B Rao * created by fdt_add_subnode() end up in the right order in FDT 6250da6f3feSBharata B Rao * for the guest kernel the enumerate the CPUs correctly. 6260da6f3feSBharata B Rao */ 6270da6f3feSBharata B Rao CPU_FOREACH_REVERSE(cs) { 6280da6f3feSBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(cs); 6292e886fb3SSam Bobroff int index = spapr_vcpu_id(cpu); 6300da6f3feSBharata B Rao DeviceClass *dc = DEVICE_GET_CLASS(cs); 6310da6f3feSBharata B Rao int offset; 6320da6f3feSBharata B Rao 6330da6f3feSBharata B Rao if ((index % smt) != 0) { 6340da6f3feSBharata B Rao continue; 6350da6f3feSBharata B Rao } 6360da6f3feSBharata B Rao 6370da6f3feSBharata B Rao nodename = g_strdup_printf("%s@%x", dc->fw_name, index); 6380da6f3feSBharata B Rao offset = fdt_add_subnode(fdt, cpus_offset, nodename); 6390da6f3feSBharata B Rao g_free(nodename); 6400da6f3feSBharata B Rao _FDT(offset); 6410da6f3feSBharata B Rao spapr_populate_cpu_dt(cs, fdt, offset, spapr); 6420da6f3feSBharata B Rao } 6430da6f3feSBharata B Rao 6440da6f3feSBharata B Rao } 6450da6f3feSBharata B Rao 646f47bd1c8SIgor Mammedov static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr) 647f47bd1c8SIgor Mammedov { 648f47bd1c8SIgor Mammedov MemoryDeviceInfoList *info; 649f47bd1c8SIgor Mammedov 650f47bd1c8SIgor Mammedov for (info = list; info; info = info->next) { 651f47bd1c8SIgor Mammedov MemoryDeviceInfo *value = info->value; 652f47bd1c8SIgor Mammedov 653f47bd1c8SIgor Mammedov if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) { 654f47bd1c8SIgor Mammedov PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data; 655f47bd1c8SIgor Mammedov 656f47bd1c8SIgor Mammedov if (pcdimm_info->addr >= addr && 657f47bd1c8SIgor Mammedov addr < (pcdimm_info->addr + pcdimm_info->size)) { 658f47bd1c8SIgor Mammedov return pcdimm_info->node; 659f47bd1c8SIgor Mammedov } 660f47bd1c8SIgor Mammedov } 661f47bd1c8SIgor Mammedov } 662f47bd1c8SIgor Mammedov 663f47bd1c8SIgor Mammedov return -1; 664f47bd1c8SIgor Mammedov } 665f47bd1c8SIgor Mammedov 66603d196b7SBharata B Rao /* 66703d196b7SBharata B Rao * Adds ibm,dynamic-reconfiguration-memory node. 66803d196b7SBharata B Rao * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation 66903d196b7SBharata B Rao * of this device tree node. 67003d196b7SBharata B Rao */ 67103d196b7SBharata B Rao static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt) 67203d196b7SBharata B Rao { 67303d196b7SBharata B Rao MachineState *machine = MACHINE(spapr); 67403d196b7SBharata B Rao int ret, i, offset; 67503d196b7SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 67603d196b7SBharata B Rao uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)}; 677d0e5a8f2SBharata B Rao uint32_t hotplug_lmb_start = spapr->hotplug_memory.base / lmb_size; 678d0e5a8f2SBharata B Rao uint32_t nr_lmbs = (spapr->hotplug_memory.base + 679d0e5a8f2SBharata B Rao memory_region_size(&spapr->hotplug_memory.mr)) / 680d0e5a8f2SBharata B Rao lmb_size; 68103d196b7SBharata B Rao uint32_t *int_buf, *cur_index, buf_len; 6826663864eSBharata B Rao int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1; 683f47bd1c8SIgor Mammedov MemoryDeviceInfoList *dimms = NULL; 68403d196b7SBharata B Rao 685ef001f06SThomas Huth /* 686d0e5a8f2SBharata B Rao * Don't create the node if there is no hotpluggable memory 68716c25aefSBharata B Rao */ 688d0e5a8f2SBharata B Rao if (machine->ram_size == machine->maxram_size) { 68916c25aefSBharata B Rao return 0; 69016c25aefSBharata B Rao } 69116c25aefSBharata B Rao 69216c25aefSBharata B Rao /* 693ef001f06SThomas Huth * Allocate enough buffer size to fit in ibm,dynamic-memory 694ef001f06SThomas Huth * or ibm,associativity-lookup-arrays 695ef001f06SThomas Huth */ 696ef001f06SThomas Huth buf_len = MAX(nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1, nr_nodes * 4 + 2) 697ef001f06SThomas Huth * sizeof(uint32_t); 69803d196b7SBharata B Rao cur_index = int_buf = g_malloc0(buf_len); 69903d196b7SBharata B Rao 70003d196b7SBharata B Rao offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory"); 70103d196b7SBharata B Rao 70203d196b7SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size, 70303d196b7SBharata B Rao sizeof(prop_lmb_size)); 70403d196b7SBharata B Rao if (ret < 0) { 70503d196b7SBharata B Rao goto out; 70603d196b7SBharata B Rao } 70703d196b7SBharata B Rao 70803d196b7SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff); 70903d196b7SBharata B Rao if (ret < 0) { 71003d196b7SBharata B Rao goto out; 71103d196b7SBharata B Rao } 71203d196b7SBharata B Rao 71303d196b7SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0); 71403d196b7SBharata B Rao if (ret < 0) { 71503d196b7SBharata B Rao goto out; 71603d196b7SBharata B Rao } 71703d196b7SBharata B Rao 718f47bd1c8SIgor Mammedov if (hotplug_lmb_start) { 719f47bd1c8SIgor Mammedov MemoryDeviceInfoList **prev = &dimms; 720f47bd1c8SIgor Mammedov qmp_pc_dimm_device_list(qdev_get_machine(), &prev); 721f47bd1c8SIgor Mammedov } 722f47bd1c8SIgor Mammedov 72303d196b7SBharata B Rao /* ibm,dynamic-memory */ 72403d196b7SBharata B Rao int_buf[0] = cpu_to_be32(nr_lmbs); 72503d196b7SBharata B Rao cur_index++; 72603d196b7SBharata B Rao for (i = 0; i < nr_lmbs; i++) { 727d0e5a8f2SBharata B Rao uint64_t addr = i * lmb_size; 72803d196b7SBharata B Rao uint32_t *dynamic_memory = cur_index; 72903d196b7SBharata B Rao 730d0e5a8f2SBharata B Rao if (i >= hotplug_lmb_start) { 731d0e5a8f2SBharata B Rao sPAPRDRConnector *drc; 732d0e5a8f2SBharata B Rao 733fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i); 73403d196b7SBharata B Rao g_assert(drc); 73503d196b7SBharata B Rao 73603d196b7SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 73703d196b7SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 7380b55aa91SDavid Gibson dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc)); 73903d196b7SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 740f47bd1c8SIgor Mammedov dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr)); 741d0e5a8f2SBharata B Rao if (memory_region_present(get_system_memory(), addr)) { 74203d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED); 74303d196b7SBharata B Rao } else { 74403d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(0); 74503d196b7SBharata B Rao } 746d0e5a8f2SBharata B Rao } else { 747d0e5a8f2SBharata B Rao /* 748d0e5a8f2SBharata B Rao * LMB information for RMA, boot time RAM and gap b/n RAM and 749d0e5a8f2SBharata B Rao * hotplug memory region -- all these are marked as reserved 750d0e5a8f2SBharata B Rao * and as having no valid DRC. 751d0e5a8f2SBharata B Rao */ 752d0e5a8f2SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 753d0e5a8f2SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 754d0e5a8f2SBharata B Rao dynamic_memory[2] = cpu_to_be32(0); 755d0e5a8f2SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 756d0e5a8f2SBharata B Rao dynamic_memory[4] = cpu_to_be32(-1); 757d0e5a8f2SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED | 758d0e5a8f2SBharata B Rao SPAPR_LMB_FLAGS_DRC_INVALID); 759d0e5a8f2SBharata B Rao } 76003d196b7SBharata B Rao 76103d196b7SBharata B Rao cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE; 76203d196b7SBharata B Rao } 763f47bd1c8SIgor Mammedov qapi_free_MemoryDeviceInfoList(dimms); 76403d196b7SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len); 76503d196b7SBharata B Rao if (ret < 0) { 76603d196b7SBharata B Rao goto out; 76703d196b7SBharata B Rao } 76803d196b7SBharata B Rao 76903d196b7SBharata B Rao /* ibm,associativity-lookup-arrays */ 77003d196b7SBharata B Rao cur_index = int_buf; 7716663864eSBharata B Rao int_buf[0] = cpu_to_be32(nr_nodes); 77203d196b7SBharata B Rao int_buf[1] = cpu_to_be32(4); /* Number of entries per associativity list */ 77303d196b7SBharata B Rao cur_index += 2; 7746663864eSBharata B Rao for (i = 0; i < nr_nodes; i++) { 77503d196b7SBharata B Rao uint32_t associativity[] = { 77603d196b7SBharata B Rao cpu_to_be32(0x0), 77703d196b7SBharata B Rao cpu_to_be32(0x0), 77803d196b7SBharata B Rao cpu_to_be32(0x0), 77903d196b7SBharata B Rao cpu_to_be32(i) 78003d196b7SBharata B Rao }; 78103d196b7SBharata B Rao memcpy(cur_index, associativity, sizeof(associativity)); 78203d196b7SBharata B Rao cur_index += 4; 78303d196b7SBharata B Rao } 78403d196b7SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf, 78503d196b7SBharata B Rao (cur_index - int_buf) * sizeof(uint32_t)); 78603d196b7SBharata B Rao out: 78703d196b7SBharata B Rao g_free(int_buf); 78803d196b7SBharata B Rao return ret; 78903d196b7SBharata B Rao } 79003d196b7SBharata B Rao 7916787d27bSMichael Roth static int spapr_dt_cas_updates(sPAPRMachineState *spapr, void *fdt, 7926787d27bSMichael Roth sPAPROptionVector *ov5_updates) 7936787d27bSMichael Roth { 7946787d27bSMichael Roth sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 795417ece33SMichael Roth int ret = 0, offset; 7966787d27bSMichael Roth 7976787d27bSMichael Roth /* Generate ibm,dynamic-reconfiguration-memory node if required */ 7986787d27bSMichael Roth if (spapr_ovec_test(ov5_updates, OV5_DRCONF_MEMORY)) { 7996787d27bSMichael Roth g_assert(smc->dr_lmb_enabled); 8006787d27bSMichael Roth ret = spapr_populate_drconf_memory(spapr, fdt); 801417ece33SMichael Roth if (ret) { 802417ece33SMichael Roth goto out; 803417ece33SMichael Roth } 8046787d27bSMichael Roth } 8056787d27bSMichael Roth 806417ece33SMichael Roth offset = fdt_path_offset(fdt, "/chosen"); 807417ece33SMichael Roth if (offset < 0) { 808417ece33SMichael Roth offset = fdt_add_subnode(fdt, 0, "chosen"); 809417ece33SMichael Roth if (offset < 0) { 810417ece33SMichael Roth return offset; 811417ece33SMichael Roth } 812417ece33SMichael Roth } 813417ece33SMichael Roth ret = spapr_ovec_populate_dt(fdt, offset, spapr->ov5_cas, 814417ece33SMichael Roth "ibm,architecture-vec-5"); 815417ece33SMichael Roth 816417ece33SMichael Roth out: 8176787d27bSMichael Roth return ret; 8186787d27bSMichael Roth } 8196787d27bSMichael Roth 82010f12e64SDaniel Henrique Barboza static bool spapr_hotplugged_dev_before_cas(void) 82110f12e64SDaniel Henrique Barboza { 82210f12e64SDaniel Henrique Barboza Object *drc_container, *obj; 82310f12e64SDaniel Henrique Barboza ObjectProperty *prop; 82410f12e64SDaniel Henrique Barboza ObjectPropertyIterator iter; 82510f12e64SDaniel Henrique Barboza 82610f12e64SDaniel Henrique Barboza drc_container = container_get(object_get_root(), "/dr-connector"); 82710f12e64SDaniel Henrique Barboza object_property_iter_init(&iter, drc_container); 82810f12e64SDaniel Henrique Barboza while ((prop = object_property_iter_next(&iter))) { 82910f12e64SDaniel Henrique Barboza if (!strstart(prop->type, "link<", NULL)) { 83010f12e64SDaniel Henrique Barboza continue; 83110f12e64SDaniel Henrique Barboza } 83210f12e64SDaniel Henrique Barboza obj = object_property_get_link(drc_container, prop->name, NULL); 83310f12e64SDaniel Henrique Barboza if (spapr_drc_needed(obj)) { 83410f12e64SDaniel Henrique Barboza return true; 83510f12e64SDaniel Henrique Barboza } 83610f12e64SDaniel Henrique Barboza } 83710f12e64SDaniel Henrique Barboza return false; 83810f12e64SDaniel Henrique Barboza } 83910f12e64SDaniel Henrique Barboza 84003d196b7SBharata B Rao int spapr_h_cas_compose_response(sPAPRMachineState *spapr, 84103d196b7SBharata B Rao target_ulong addr, target_ulong size, 8426787d27bSMichael Roth sPAPROptionVector *ov5_updates) 84303d196b7SBharata B Rao { 84403d196b7SBharata B Rao void *fdt, *fdt_skel; 84503d196b7SBharata B Rao sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 }; 84603d196b7SBharata B Rao 84710f12e64SDaniel Henrique Barboza if (spapr_hotplugged_dev_before_cas()) { 84810f12e64SDaniel Henrique Barboza return 1; 84910f12e64SDaniel Henrique Barboza } 85010f12e64SDaniel Henrique Barboza 851827b17c4SGreg Kurz if (size < sizeof(hdr) || size > FW_MAX_SIZE) { 852827b17c4SGreg Kurz error_report("SLOF provided an unexpected CAS buffer size " 853827b17c4SGreg Kurz TARGET_FMT_lu " (min: %zu, max: %u)", 854827b17c4SGreg Kurz size, sizeof(hdr), FW_MAX_SIZE); 855827b17c4SGreg Kurz exit(EXIT_FAILURE); 856827b17c4SGreg Kurz } 857827b17c4SGreg Kurz 85803d196b7SBharata B Rao size -= sizeof(hdr); 85903d196b7SBharata B Rao 86010f12e64SDaniel Henrique Barboza /* Create skeleton */ 86103d196b7SBharata B Rao fdt_skel = g_malloc0(size); 86203d196b7SBharata B Rao _FDT((fdt_create(fdt_skel, size))); 86303d196b7SBharata B Rao _FDT((fdt_begin_node(fdt_skel, ""))); 86403d196b7SBharata B Rao _FDT((fdt_end_node(fdt_skel))); 86503d196b7SBharata B Rao _FDT((fdt_finish(fdt_skel))); 86603d196b7SBharata B Rao fdt = g_malloc0(size); 86703d196b7SBharata B Rao _FDT((fdt_open_into(fdt_skel, fdt, size))); 86803d196b7SBharata B Rao g_free(fdt_skel); 86903d196b7SBharata B Rao 87003d196b7SBharata B Rao /* Fixup cpu nodes */ 87103d196b7SBharata B Rao _FDT((spapr_fixup_cpu_dt(fdt, spapr))); 87203d196b7SBharata B Rao 8736787d27bSMichael Roth if (spapr_dt_cas_updates(spapr, fdt, ov5_updates)) { 8746787d27bSMichael Roth return -1; 87503d196b7SBharata B Rao } 87603d196b7SBharata B Rao 87703d196b7SBharata B Rao /* Pack resulting tree */ 87803d196b7SBharata B Rao _FDT((fdt_pack(fdt))); 87903d196b7SBharata B Rao 88003d196b7SBharata B Rao if (fdt_totalsize(fdt) + sizeof(hdr) > size) { 88103d196b7SBharata B Rao trace_spapr_cas_failed(size); 88203d196b7SBharata B Rao return -1; 88303d196b7SBharata B Rao } 88403d196b7SBharata B Rao 88503d196b7SBharata B Rao cpu_physical_memory_write(addr, &hdr, sizeof(hdr)); 88603d196b7SBharata B Rao cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt)); 88703d196b7SBharata B Rao trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr)); 88803d196b7SBharata B Rao g_free(fdt); 88903d196b7SBharata B Rao 89003d196b7SBharata B Rao return 0; 89103d196b7SBharata B Rao } 89203d196b7SBharata B Rao 8933f5dabceSDavid Gibson static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt) 8943f5dabceSDavid Gibson { 8953f5dabceSDavid Gibson int rtas; 8963f5dabceSDavid Gibson GString *hypertas = g_string_sized_new(256); 8973f5dabceSDavid Gibson GString *qemu_hypertas = g_string_sized_new(256); 8983f5dabceSDavid Gibson uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) }; 8993f5dabceSDavid Gibson uint64_t max_hotplug_addr = spapr->hotplug_memory.base + 9003f5dabceSDavid Gibson memory_region_size(&spapr->hotplug_memory.mr); 9013f5dabceSDavid Gibson uint32_t lrdr_capacity[] = { 9023f5dabceSDavid Gibson cpu_to_be32(max_hotplug_addr >> 32), 9033f5dabceSDavid Gibson cpu_to_be32(max_hotplug_addr & 0xffffffff), 9043f5dabceSDavid Gibson 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE), 9053f5dabceSDavid Gibson cpu_to_be32(max_cpus / smp_threads), 9063f5dabceSDavid Gibson }; 9073f5dabceSDavid Gibson 9083f5dabceSDavid Gibson _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); 9093f5dabceSDavid Gibson 9103f5dabceSDavid Gibson /* hypertas */ 9113f5dabceSDavid Gibson add_str(hypertas, "hcall-pft"); 9123f5dabceSDavid Gibson add_str(hypertas, "hcall-term"); 9133f5dabceSDavid Gibson add_str(hypertas, "hcall-dabr"); 9143f5dabceSDavid Gibson add_str(hypertas, "hcall-interrupt"); 9153f5dabceSDavid Gibson add_str(hypertas, "hcall-tce"); 9163f5dabceSDavid Gibson add_str(hypertas, "hcall-vio"); 9173f5dabceSDavid Gibson add_str(hypertas, "hcall-splpar"); 9183f5dabceSDavid Gibson add_str(hypertas, "hcall-bulk"); 9193f5dabceSDavid Gibson add_str(hypertas, "hcall-set-mode"); 9203f5dabceSDavid Gibson add_str(hypertas, "hcall-sprg0"); 9213f5dabceSDavid Gibson add_str(hypertas, "hcall-copy"); 9223f5dabceSDavid Gibson add_str(hypertas, "hcall-debug"); 9233f5dabceSDavid Gibson add_str(qemu_hypertas, "hcall-memop1"); 9243f5dabceSDavid Gibson 9253f5dabceSDavid Gibson if (!kvm_enabled() || kvmppc_spapr_use_multitce()) { 9263f5dabceSDavid Gibson add_str(hypertas, "hcall-multi-tce"); 9273f5dabceSDavid Gibson } 92830f4b05bSDavid Gibson 92930f4b05bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 93030f4b05bSDavid Gibson add_str(hypertas, "hcall-hpt-resize"); 93130f4b05bSDavid Gibson } 93230f4b05bSDavid Gibson 9333f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions", 9343f5dabceSDavid Gibson hypertas->str, hypertas->len)); 9353f5dabceSDavid Gibson g_string_free(hypertas, TRUE); 9363f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions", 9373f5dabceSDavid Gibson qemu_hypertas->str, qemu_hypertas->len)); 9383f5dabceSDavid Gibson g_string_free(qemu_hypertas, TRUE); 9393f5dabceSDavid Gibson 9403f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", 9413f5dabceSDavid Gibson refpoints, sizeof(refpoints))); 9423f5dabceSDavid Gibson 9433f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max", 9443f5dabceSDavid Gibson RTAS_ERROR_LOG_MAX)); 9453f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate", 9463f5dabceSDavid Gibson RTAS_EVENT_SCAN_RATE)); 9473f5dabceSDavid Gibson 9484f441474SDavid Gibson g_assert(msi_nonbroken); 9493f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0)); 9503f5dabceSDavid Gibson 9513f5dabceSDavid Gibson /* 9523f5dabceSDavid Gibson * According to PAPR, rtas ibm,os-term does not guarantee a return 9533f5dabceSDavid Gibson * back to the guest cpu. 9543f5dabceSDavid Gibson * 9553f5dabceSDavid Gibson * While an additional ibm,extended-os-term property indicates 9563f5dabceSDavid Gibson * that rtas call return will always occur. Set this property. 9573f5dabceSDavid Gibson */ 9583f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0)); 9593f5dabceSDavid Gibson 9603f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity", 9613f5dabceSDavid Gibson lrdr_capacity, sizeof(lrdr_capacity))); 9623f5dabceSDavid Gibson 9633f5dabceSDavid Gibson spapr_dt_rtas_tokens(fdt, rtas); 9643f5dabceSDavid Gibson } 9653f5dabceSDavid Gibson 9669fb4541fSSam Bobroff /* Prepare ibm,arch-vec-5-platform-support, which indicates the MMU features 9679fb4541fSSam Bobroff * that the guest may request and thus the valid values for bytes 24..26 of 9689fb4541fSSam Bobroff * option vector 5: */ 9699fb4541fSSam Bobroff static void spapr_dt_ov5_platform_support(void *fdt, int chosen) 9709fb4541fSSam Bobroff { 971545d6e2bSSuraj Jitindar Singh PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); 972545d6e2bSSuraj Jitindar Singh 973f2b14e3aSCédric Le Goater char val[2 * 4] = { 97421f3f8dbSCédric Le Goater 23, 0x00, /* Xive mode, filled in below. */ 9759fb4541fSSam Bobroff 24, 0x00, /* Hash/Radix, filled in below. */ 9769fb4541fSSam Bobroff 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ 9779fb4541fSSam Bobroff 26, 0x40, /* Radix options: GTSE == yes. */ 9789fb4541fSSam Bobroff }; 9799fb4541fSSam Bobroff 9807abd43baSSuraj Jitindar Singh if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0, 9817abd43baSSuraj Jitindar Singh first_ppc_cpu->compat_pvr)) { 9827abd43baSSuraj Jitindar Singh /* If we're in a pre POWER9 compat mode then the guest should do hash */ 9837abd43baSSuraj Jitindar Singh val[3] = 0x00; /* Hash */ 9847abd43baSSuraj Jitindar Singh } else if (kvm_enabled()) { 9859fb4541fSSam Bobroff if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) { 986f2b14e3aSCédric Le Goater val[3] = 0x80; /* OV5_MMU_BOTH */ 9879fb4541fSSam Bobroff } else if (kvmppc_has_cap_mmu_radix()) { 988f2b14e3aSCédric Le Goater val[3] = 0x40; /* OV5_MMU_RADIX_300 */ 9899fb4541fSSam Bobroff } else { 990f2b14e3aSCédric Le Goater val[3] = 0x00; /* Hash */ 9919fb4541fSSam Bobroff } 9929fb4541fSSam Bobroff } else { 9937abd43baSSuraj Jitindar Singh /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */ 994f2b14e3aSCédric Le Goater val[3] = 0xC0; 995545d6e2bSSuraj Jitindar Singh } 9969fb4541fSSam Bobroff _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support", 9979fb4541fSSam Bobroff val, sizeof(val))); 9989fb4541fSSam Bobroff } 9999fb4541fSSam Bobroff 10007c866c6aSDavid Gibson static void spapr_dt_chosen(sPAPRMachineState *spapr, void *fdt) 10017c866c6aSDavid Gibson { 10027c866c6aSDavid Gibson MachineState *machine = MACHINE(spapr); 10037c866c6aSDavid Gibson int chosen; 10047c866c6aSDavid Gibson const char *boot_device = machine->boot_order; 10057c866c6aSDavid Gibson char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); 10067c866c6aSDavid Gibson size_t cb = 0; 10077c866c6aSDavid Gibson char *bootlist = get_boot_devices_list(&cb, true); 10087c866c6aSDavid Gibson 10097c866c6aSDavid Gibson _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); 10107c866c6aSDavid Gibson 10117c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "bootargs", machine->kernel_cmdline)); 10127c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start", 10137c866c6aSDavid Gibson spapr->initrd_base)); 10147c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end", 10157c866c6aSDavid Gibson spapr->initrd_base + spapr->initrd_size)); 10167c866c6aSDavid Gibson 10177c866c6aSDavid Gibson if (spapr->kernel_size) { 10187c866c6aSDavid Gibson uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR), 10197c866c6aSDavid Gibson cpu_to_be64(spapr->kernel_size) }; 10207c866c6aSDavid Gibson 10217c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel", 10227c866c6aSDavid Gibson &kprop, sizeof(kprop))); 10237c866c6aSDavid Gibson if (spapr->kernel_le) { 10247c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0)); 10257c866c6aSDavid Gibson } 10267c866c6aSDavid Gibson } 10277c866c6aSDavid Gibson if (boot_menu) { 10287c866c6aSDavid Gibson _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu))); 10297c866c6aSDavid Gibson } 10307c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width)); 10317c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height)); 10327c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth)); 10337c866c6aSDavid Gibson 10347c866c6aSDavid Gibson if (cb && bootlist) { 10357c866c6aSDavid Gibson int i; 10367c866c6aSDavid Gibson 10377c866c6aSDavid Gibson for (i = 0; i < cb; i++) { 10387c866c6aSDavid Gibson if (bootlist[i] == '\n') { 10397c866c6aSDavid Gibson bootlist[i] = ' '; 10407c866c6aSDavid Gibson } 10417c866c6aSDavid Gibson } 10427c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist)); 10437c866c6aSDavid Gibson } 10447c866c6aSDavid Gibson 10457c866c6aSDavid Gibson if (boot_device && strlen(boot_device)) { 10467c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device)); 10477c866c6aSDavid Gibson } 10487c866c6aSDavid Gibson 10497c866c6aSDavid Gibson if (!spapr->has_graphics && stdout_path) { 10507c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path)); 10517c866c6aSDavid Gibson } 10527c866c6aSDavid Gibson 10539fb4541fSSam Bobroff spapr_dt_ov5_platform_support(fdt, chosen); 10549fb4541fSSam Bobroff 10557c866c6aSDavid Gibson g_free(stdout_path); 10567c866c6aSDavid Gibson g_free(bootlist); 10577c866c6aSDavid Gibson } 10587c866c6aSDavid Gibson 1059fca5f2dcSDavid Gibson static void spapr_dt_hypervisor(sPAPRMachineState *spapr, void *fdt) 1060fca5f2dcSDavid Gibson { 1061fca5f2dcSDavid Gibson /* The /hypervisor node isn't in PAPR - this is a hack to allow PR 1062fca5f2dcSDavid Gibson * KVM to work under pHyp with some guest co-operation */ 1063fca5f2dcSDavid Gibson int hypervisor; 1064fca5f2dcSDavid Gibson uint8_t hypercall[16]; 1065fca5f2dcSDavid Gibson 1066fca5f2dcSDavid Gibson _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor")); 1067fca5f2dcSDavid Gibson /* indicate KVM hypercall interface */ 1068fca5f2dcSDavid Gibson _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm")); 1069fca5f2dcSDavid Gibson if (kvmppc_has_cap_fixup_hcalls()) { 1070fca5f2dcSDavid Gibson /* 1071fca5f2dcSDavid Gibson * Older KVM versions with older guest kernels were broken 1072fca5f2dcSDavid Gibson * with the magic page, don't allow the guest to map it. 1073fca5f2dcSDavid Gibson */ 1074fca5f2dcSDavid Gibson if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall, 1075fca5f2dcSDavid Gibson sizeof(hypercall))) { 1076fca5f2dcSDavid Gibson _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions", 1077fca5f2dcSDavid Gibson hypercall, sizeof(hypercall))); 1078fca5f2dcSDavid Gibson } 1079fca5f2dcSDavid Gibson } 1080fca5f2dcSDavid Gibson } 1081fca5f2dcSDavid Gibson 1082997b6cfcSDavid Gibson static void *spapr_build_fdt(sPAPRMachineState *spapr, 108353018216SPaolo Bonzini hwaddr rtas_addr, 108453018216SPaolo Bonzini hwaddr rtas_size) 108553018216SPaolo Bonzini { 1086c86c1affSDaniel Henrique Barboza MachineState *machine = MACHINE(spapr); 10873c0c47e3SDavid Gibson MachineClass *mc = MACHINE_GET_CLASS(machine); 1088c20d332aSBharata B Rao sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 10897c866c6aSDavid Gibson int ret; 109053018216SPaolo Bonzini void *fdt; 109153018216SPaolo Bonzini sPAPRPHBState *phb; 1092398a0bd5SDavid Gibson char *buf; 109353018216SPaolo Bonzini 1094398a0bd5SDavid Gibson fdt = g_malloc0(FDT_MAX_SIZE); 1095398a0bd5SDavid Gibson _FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE))); 109653018216SPaolo Bonzini 1097398a0bd5SDavid Gibson /* Root node */ 1098398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp")); 1099398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)")); 1100398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries")); 1101398a0bd5SDavid Gibson 1102398a0bd5SDavid Gibson /* 1103398a0bd5SDavid Gibson * Add info to guest to indentify which host is it being run on 1104398a0bd5SDavid Gibson * and what is the uuid of the guest 1105398a0bd5SDavid Gibson */ 1106398a0bd5SDavid Gibson if (kvmppc_get_host_model(&buf)) { 1107398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-model", buf)); 1108398a0bd5SDavid Gibson g_free(buf); 1109398a0bd5SDavid Gibson } 1110398a0bd5SDavid Gibson if (kvmppc_get_host_serial(&buf)) { 1111398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf)); 1112398a0bd5SDavid Gibson g_free(buf); 1113398a0bd5SDavid Gibson } 1114398a0bd5SDavid Gibson 1115398a0bd5SDavid Gibson buf = qemu_uuid_unparse_strdup(&qemu_uuid); 1116398a0bd5SDavid Gibson 1117398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf)); 1118398a0bd5SDavid Gibson if (qemu_uuid_set) { 1119398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "system-id", buf)); 1120398a0bd5SDavid Gibson } 1121398a0bd5SDavid Gibson g_free(buf); 1122398a0bd5SDavid Gibson 1123398a0bd5SDavid Gibson if (qemu_get_vm_name()) { 1124398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name", 1125398a0bd5SDavid Gibson qemu_get_vm_name())); 1126398a0bd5SDavid Gibson } 1127398a0bd5SDavid Gibson 1128398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2)); 1129398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); 113053018216SPaolo Bonzini 1131fc7e0765SDavid Gibson /* /interrupt controller */ 1132fc7e0765SDavid Gibson spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP); 1133fc7e0765SDavid Gibson 1134e8f986fcSBharata B Rao ret = spapr_populate_memory(spapr, fdt); 1135e8f986fcSBharata B Rao if (ret < 0) { 1136ce9863b7SCédric Le Goater error_report("couldn't setup memory nodes in fdt"); 1137e8f986fcSBharata B Rao exit(1); 113853018216SPaolo Bonzini } 113953018216SPaolo Bonzini 1140bf5a6696SDavid Gibson /* /vdevice */ 1141bf5a6696SDavid Gibson spapr_dt_vdevice(spapr->vio_bus, fdt); 114253018216SPaolo Bonzini 11434d9392beSThomas Huth if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) { 11444d9392beSThomas Huth ret = spapr_rng_populate_dt(fdt); 11454d9392beSThomas Huth if (ret < 0) { 1146ce9863b7SCédric Le Goater error_report("could not set up rng device in the fdt"); 11474d9392beSThomas Huth exit(1); 11484d9392beSThomas Huth } 11494d9392beSThomas Huth } 11504d9392beSThomas Huth 115153018216SPaolo Bonzini QLIST_FOREACH(phb, &spapr->phbs, list) { 115253018216SPaolo Bonzini ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt); 115353018216SPaolo Bonzini if (ret < 0) { 1154da34fed7SThomas Huth error_report("couldn't setup PCI devices in fdt"); 115553018216SPaolo Bonzini exit(1); 115653018216SPaolo Bonzini } 1157da34fed7SThomas Huth } 115853018216SPaolo Bonzini 11590da6f3feSBharata B Rao /* cpus */ 11600da6f3feSBharata B Rao spapr_populate_cpus_dt_node(fdt, spapr); 116153018216SPaolo Bonzini 1162c20d332aSBharata B Rao if (smc->dr_lmb_enabled) { 1163c20d332aSBharata B Rao _FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB)); 1164c20d332aSBharata B Rao } 1165c20d332aSBharata B Rao 1166c5514d0eSIgor Mammedov if (mc->has_hotpluggable_cpus) { 1167af81cf32SBharata B Rao int offset = fdt_path_offset(fdt, "/cpus"); 1168af81cf32SBharata B Rao ret = spapr_drc_populate_dt(fdt, offset, NULL, 1169af81cf32SBharata B Rao SPAPR_DR_CONNECTOR_TYPE_CPU); 1170af81cf32SBharata B Rao if (ret < 0) { 1171af81cf32SBharata B Rao error_report("Couldn't set up CPU DR device tree properties"); 1172af81cf32SBharata B Rao exit(1); 1173af81cf32SBharata B Rao } 1174af81cf32SBharata B Rao } 1175af81cf32SBharata B Rao 1176ffb1e275SDavid Gibson /* /event-sources */ 1177ffbb1705SMichael Roth spapr_dt_events(spapr, fdt); 1178ffb1e275SDavid Gibson 11793f5dabceSDavid Gibson /* /rtas */ 11803f5dabceSDavid Gibson spapr_dt_rtas(spapr, fdt); 11813f5dabceSDavid Gibson 11827c866c6aSDavid Gibson /* /chosen */ 11837c866c6aSDavid Gibson spapr_dt_chosen(spapr, fdt); 1184cf6e5223SDavid Gibson 1185fca5f2dcSDavid Gibson /* /hypervisor */ 1186fca5f2dcSDavid Gibson if (kvm_enabled()) { 1187fca5f2dcSDavid Gibson spapr_dt_hypervisor(spapr, fdt); 1188fca5f2dcSDavid Gibson } 1189fca5f2dcSDavid Gibson 1190cf6e5223SDavid Gibson /* Build memory reserve map */ 1191cf6e5223SDavid Gibson if (spapr->kernel_size) { 1192cf6e5223SDavid Gibson _FDT((fdt_add_mem_rsv(fdt, KERNEL_LOAD_ADDR, spapr->kernel_size))); 1193cf6e5223SDavid Gibson } 1194cf6e5223SDavid Gibson if (spapr->initrd_size) { 1195cf6e5223SDavid Gibson _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, spapr->initrd_size))); 1196cf6e5223SDavid Gibson } 1197cf6e5223SDavid Gibson 11986787d27bSMichael Roth /* ibm,client-architecture-support updates */ 11996787d27bSMichael Roth ret = spapr_dt_cas_updates(spapr, fdt, spapr->ov5_cas); 12006787d27bSMichael Roth if (ret < 0) { 12016787d27bSMichael Roth error_report("couldn't setup CAS properties fdt"); 12026787d27bSMichael Roth exit(1); 12036787d27bSMichael Roth } 12046787d27bSMichael Roth 1205997b6cfcSDavid Gibson return fdt; 120653018216SPaolo Bonzini } 120753018216SPaolo Bonzini 120853018216SPaolo Bonzini static uint64_t translate_kernel_address(void *opaque, uint64_t addr) 120953018216SPaolo Bonzini { 121053018216SPaolo Bonzini return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR; 121153018216SPaolo Bonzini } 121253018216SPaolo Bonzini 12131d1be34dSDavid Gibson static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, 12141d1be34dSDavid Gibson PowerPCCPU *cpu) 121553018216SPaolo Bonzini { 121653018216SPaolo Bonzini CPUPPCState *env = &cpu->env; 121753018216SPaolo Bonzini 12188d04fb55SJan Kiszka /* The TCG path should also be holding the BQL at this point */ 12198d04fb55SJan Kiszka g_assert(qemu_mutex_iothread_locked()); 12208d04fb55SJan Kiszka 122153018216SPaolo Bonzini if (msr_pr) { 122253018216SPaolo Bonzini hcall_dprintf("Hypercall made with MSR[PR]=1\n"); 122353018216SPaolo Bonzini env->gpr[3] = H_PRIVILEGE; 122453018216SPaolo Bonzini } else { 122553018216SPaolo Bonzini env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); 122653018216SPaolo Bonzini } 122753018216SPaolo Bonzini } 122853018216SPaolo Bonzini 12299861bb3eSSuraj Jitindar Singh static uint64_t spapr_get_patbe(PPCVirtualHypervisor *vhyp) 12309861bb3eSSuraj Jitindar Singh { 12319861bb3eSSuraj Jitindar Singh sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); 12329861bb3eSSuraj Jitindar Singh 12339861bb3eSSuraj Jitindar Singh return spapr->patb_entry; 12349861bb3eSSuraj Jitindar Singh } 12359861bb3eSSuraj Jitindar Singh 1236e6b8fd24SSamuel Mendoza-Jonas #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) 1237e6b8fd24SSamuel Mendoza-Jonas #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) 1238e6b8fd24SSamuel Mendoza-Jonas #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) 1239e6b8fd24SSamuel Mendoza-Jonas #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY)) 1240e6b8fd24SSamuel Mendoza-Jonas #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY)) 1241e6b8fd24SSamuel Mendoza-Jonas 1242715c5407SDavid Gibson /* 1243715c5407SDavid Gibson * Get the fd to access the kernel htab, re-opening it if necessary 1244715c5407SDavid Gibson */ 1245715c5407SDavid Gibson static int get_htab_fd(sPAPRMachineState *spapr) 1246715c5407SDavid Gibson { 124714b0d748SGreg Kurz Error *local_err = NULL; 124814b0d748SGreg Kurz 1249715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1250715c5407SDavid Gibson return spapr->htab_fd; 1251715c5407SDavid Gibson } 1252715c5407SDavid Gibson 125314b0d748SGreg Kurz spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err); 1254715c5407SDavid Gibson if (spapr->htab_fd < 0) { 125514b0d748SGreg Kurz error_report_err(local_err); 1256715c5407SDavid Gibson } 1257715c5407SDavid Gibson 1258715c5407SDavid Gibson return spapr->htab_fd; 1259715c5407SDavid Gibson } 1260715c5407SDavid Gibson 1261b4db5413SSuraj Jitindar Singh void close_htab_fd(sPAPRMachineState *spapr) 1262715c5407SDavid Gibson { 1263715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1264715c5407SDavid Gibson close(spapr->htab_fd); 1265715c5407SDavid Gibson } 1266715c5407SDavid Gibson spapr->htab_fd = -1; 1267715c5407SDavid Gibson } 1268715c5407SDavid Gibson 1269e57ca75cSDavid Gibson static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp) 1270e57ca75cSDavid Gibson { 1271e57ca75cSDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); 1272e57ca75cSDavid Gibson 1273e57ca75cSDavid Gibson return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1; 1274e57ca75cSDavid Gibson } 1275e57ca75cSDavid Gibson 12761ec26c75SGreg Kurz static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp) 12771ec26c75SGreg Kurz { 12781ec26c75SGreg Kurz sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); 12791ec26c75SGreg Kurz 12801ec26c75SGreg Kurz assert(kvm_enabled()); 12811ec26c75SGreg Kurz 12821ec26c75SGreg Kurz if (!spapr->htab) { 12831ec26c75SGreg Kurz return 0; 12841ec26c75SGreg Kurz } 12851ec26c75SGreg Kurz 12861ec26c75SGreg Kurz return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18); 12871ec26c75SGreg Kurz } 12881ec26c75SGreg Kurz 1289e57ca75cSDavid Gibson static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp, 1290e57ca75cSDavid Gibson hwaddr ptex, int n) 1291e57ca75cSDavid Gibson { 1292e57ca75cSDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); 1293e57ca75cSDavid Gibson hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 1294e57ca75cSDavid Gibson 1295e57ca75cSDavid Gibson if (!spapr->htab) { 1296e57ca75cSDavid Gibson /* 1297e57ca75cSDavid Gibson * HTAB is controlled by KVM. Fetch into temporary buffer 1298e57ca75cSDavid Gibson */ 1299e57ca75cSDavid Gibson ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64); 1300e57ca75cSDavid Gibson kvmppc_read_hptes(hptes, ptex, n); 1301e57ca75cSDavid Gibson return hptes; 1302e57ca75cSDavid Gibson } 1303e57ca75cSDavid Gibson 1304e57ca75cSDavid Gibson /* 1305e57ca75cSDavid Gibson * HTAB is controlled by QEMU. Just point to the internally 1306e57ca75cSDavid Gibson * accessible PTEG. 1307e57ca75cSDavid Gibson */ 1308e57ca75cSDavid Gibson return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset); 1309e57ca75cSDavid Gibson } 1310e57ca75cSDavid Gibson 1311e57ca75cSDavid Gibson static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp, 1312e57ca75cSDavid Gibson const ppc_hash_pte64_t *hptes, 1313e57ca75cSDavid Gibson hwaddr ptex, int n) 1314e57ca75cSDavid Gibson { 1315e57ca75cSDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); 1316e57ca75cSDavid Gibson 1317e57ca75cSDavid Gibson if (!spapr->htab) { 1318e57ca75cSDavid Gibson g_free((void *)hptes); 1319e57ca75cSDavid Gibson } 1320e57ca75cSDavid Gibson 1321e57ca75cSDavid Gibson /* Nothing to do for qemu managed HPT */ 1322e57ca75cSDavid Gibson } 1323e57ca75cSDavid Gibson 1324e57ca75cSDavid Gibson static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1325e57ca75cSDavid Gibson uint64_t pte0, uint64_t pte1) 1326e57ca75cSDavid Gibson { 1327e57ca75cSDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); 1328e57ca75cSDavid Gibson hwaddr offset = ptex * HASH_PTE_SIZE_64; 1329e57ca75cSDavid Gibson 1330e57ca75cSDavid Gibson if (!spapr->htab) { 1331e57ca75cSDavid Gibson kvmppc_write_hpte(ptex, pte0, pte1); 1332e57ca75cSDavid Gibson } else { 1333e57ca75cSDavid Gibson stq_p(spapr->htab + offset, pte0); 1334e57ca75cSDavid Gibson stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); 1335e57ca75cSDavid Gibson } 1336e57ca75cSDavid Gibson } 1337e57ca75cSDavid Gibson 13380b0b8310SDavid Gibson int spapr_hpt_shift_for_ramsize(uint64_t ramsize) 13398dfe8e7fSDavid Gibson { 13408dfe8e7fSDavid Gibson int shift; 13418dfe8e7fSDavid Gibson 13428dfe8e7fSDavid Gibson /* We aim for a hash table of size 1/128 the size of RAM (rounded 13438dfe8e7fSDavid Gibson * up). The PAPR recommendation is actually 1/64 of RAM size, but 13448dfe8e7fSDavid Gibson * that's much more than is needed for Linux guests */ 13458dfe8e7fSDavid Gibson shift = ctz64(pow2ceil(ramsize)) - 7; 13468dfe8e7fSDavid Gibson shift = MAX(shift, 18); /* Minimum architected size */ 13478dfe8e7fSDavid Gibson shift = MIN(shift, 46); /* Maximum architected size */ 13488dfe8e7fSDavid Gibson return shift; 13498dfe8e7fSDavid Gibson } 13508dfe8e7fSDavid Gibson 135106ec79e8SBharata B Rao void spapr_free_hpt(sPAPRMachineState *spapr) 135206ec79e8SBharata B Rao { 135306ec79e8SBharata B Rao g_free(spapr->htab); 135406ec79e8SBharata B Rao spapr->htab = NULL; 135506ec79e8SBharata B Rao spapr->htab_shift = 0; 135606ec79e8SBharata B Rao close_htab_fd(spapr); 135706ec79e8SBharata B Rao } 135806ec79e8SBharata B Rao 13592772cf6bSDavid Gibson void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift, 1360c5f54f3eSDavid Gibson Error **errp) 136153018216SPaolo Bonzini { 1362c5f54f3eSDavid Gibson long rc; 136353018216SPaolo Bonzini 1364c5f54f3eSDavid Gibson /* Clean up any HPT info from a previous boot */ 136506ec79e8SBharata B Rao spapr_free_hpt(spapr); 136653018216SPaolo Bonzini 1367c5f54f3eSDavid Gibson rc = kvmppc_reset_htab(shift); 1368c5f54f3eSDavid Gibson if (rc < 0) { 1369c5f54f3eSDavid Gibson /* kernel-side HPT needed, but couldn't allocate one */ 1370c5f54f3eSDavid Gibson error_setg_errno(errp, errno, 1371c5f54f3eSDavid Gibson "Failed to allocate KVM HPT of order %d (try smaller maxmem?)", 1372c5f54f3eSDavid Gibson shift); 1373c5f54f3eSDavid Gibson /* This is almost certainly fatal, but if the caller really 1374c5f54f3eSDavid Gibson * wants to carry on with shift == 0, it's welcome to try */ 1375c5f54f3eSDavid Gibson } else if (rc > 0) { 1376c5f54f3eSDavid Gibson /* kernel-side HPT allocated */ 1377c5f54f3eSDavid Gibson if (rc != shift) { 1378c5f54f3eSDavid Gibson error_setg(errp, 1379c5f54f3eSDavid Gibson "Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)", 1380c5f54f3eSDavid Gibson shift, rc); 13817735fedaSBharata B Rao } 13827735fedaSBharata B Rao 138353018216SPaolo Bonzini spapr->htab_shift = shift; 1384c18ad9a5SDavid Gibson spapr->htab = NULL; 1385b817772aSBharata B Rao } else { 1386c5f54f3eSDavid Gibson /* kernel-side HPT not needed, allocate in userspace instead */ 1387c5f54f3eSDavid Gibson size_t size = 1ULL << shift; 1388c5f54f3eSDavid Gibson int i; 138901a57972SSamuel Mendoza-Jonas 1390c5f54f3eSDavid Gibson spapr->htab = qemu_memalign(size, size); 1391c5f54f3eSDavid Gibson if (!spapr->htab) { 1392c5f54f3eSDavid Gibson error_setg_errno(errp, errno, 1393c5f54f3eSDavid Gibson "Could not allocate HPT of order %d", shift); 1394c5f54f3eSDavid Gibson return; 1395b817772aSBharata B Rao } 1396b817772aSBharata B Rao 1397c5f54f3eSDavid Gibson memset(spapr->htab, 0, size); 1398c5f54f3eSDavid Gibson spapr->htab_shift = shift; 1399b817772aSBharata B Rao 1400c5f54f3eSDavid Gibson for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { 1401c5f54f3eSDavid Gibson DIRTY_HPTE(HPTE(spapr->htab, i)); 14027735fedaSBharata B Rao } 140353018216SPaolo Bonzini } 1404ee4d9eccSSuraj Jitindar Singh /* We're setting up a hash table, so that means we're not radix */ 1405ee4d9eccSSuraj Jitindar Singh spapr->patb_entry = 0; 140653018216SPaolo Bonzini } 140753018216SPaolo Bonzini 1408b4db5413SSuraj Jitindar Singh void spapr_setup_hpt_and_vrma(sPAPRMachineState *spapr) 1409b4db5413SSuraj Jitindar Singh { 14102772cf6bSDavid Gibson int hpt_shift; 14112772cf6bSDavid Gibson 14122772cf6bSDavid Gibson if ((spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) 14132772cf6bSDavid Gibson || (spapr->cas_reboot 14142772cf6bSDavid Gibson && !spapr_ovec_test(spapr->ov5_cas, OV5_HPT_RESIZE))) { 14152772cf6bSDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); 14162772cf6bSDavid Gibson } else { 1417768a20f3SDavid Gibson uint64_t current_ram_size; 1418768a20f3SDavid Gibson 1419768a20f3SDavid Gibson current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size(); 1420768a20f3SDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size); 14212772cf6bSDavid Gibson } 14222772cf6bSDavid Gibson spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal); 14232772cf6bSDavid Gibson 1424b4db5413SSuraj Jitindar Singh if (spapr->vrma_adjust) { 1425c86c1affSDaniel Henrique Barboza spapr->rma_size = kvmppc_rma_size(spapr_node0_size(MACHINE(spapr)), 1426b4db5413SSuraj Jitindar Singh spapr->htab_shift); 1427b4db5413SSuraj Jitindar Singh } 1428b4db5413SSuraj Jitindar Singh } 1429b4db5413SSuraj Jitindar Singh 14304f01a637SDavid Gibson static void find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque) 14319e3f9733SAlexander Graf { 14329e3f9733SAlexander Graf bool matched = false; 14339e3f9733SAlexander Graf 14349e3f9733SAlexander Graf if (object_dynamic_cast(OBJECT(sbdev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 14359e3f9733SAlexander Graf matched = true; 14369e3f9733SAlexander Graf } 14379e3f9733SAlexander Graf 14389e3f9733SAlexander Graf if (!matched) { 14399e3f9733SAlexander Graf error_report("Device %s is not supported by this machine yet.", 14409e3f9733SAlexander Graf qdev_fw_name(DEVICE(sbdev))); 14419e3f9733SAlexander Graf exit(1); 14429e3f9733SAlexander Graf } 14439e3f9733SAlexander Graf } 14449e3f9733SAlexander Graf 144582512483SGreg Kurz static int spapr_reset_drcs(Object *child, void *opaque) 144682512483SGreg Kurz { 144782512483SGreg Kurz sPAPRDRConnector *drc = 144882512483SGreg Kurz (sPAPRDRConnector *) object_dynamic_cast(child, 144982512483SGreg Kurz TYPE_SPAPR_DR_CONNECTOR); 145082512483SGreg Kurz 145182512483SGreg Kurz if (drc) { 145282512483SGreg Kurz spapr_drc_reset(drc); 145382512483SGreg Kurz } 145482512483SGreg Kurz 145582512483SGreg Kurz return 0; 145682512483SGreg Kurz } 145782512483SGreg Kurz 1458bcb5ce08SDavid Gibson static void spapr_machine_reset(void) 145953018216SPaolo Bonzini { 1460c5f54f3eSDavid Gibson MachineState *machine = MACHINE(qdev_get_machine()); 1461c5f54f3eSDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(machine); 1462182735efSAndreas Färber PowerPCCPU *first_ppc_cpu; 1463b7d1f77aSBenjamin Herrenschmidt uint32_t rtas_limit; 1464cae172abSDavid Gibson hwaddr rtas_addr, fdt_addr; 1465997b6cfcSDavid Gibson void *fdt; 1466997b6cfcSDavid Gibson int rc; 1467259186a7SAndreas Färber 14689e3f9733SAlexander Graf /* Check for unknown sysbus devices */ 14699e3f9733SAlexander Graf foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL); 14709e3f9733SAlexander Graf 147133face6bSDavid Gibson spapr_caps_reset(spapr); 147233face6bSDavid Gibson 14731481fe5fSLaurent Vivier first_ppc_cpu = POWERPC_CPU(first_cpu); 14741481fe5fSLaurent Vivier if (kvm_enabled() && kvmppc_has_cap_mmu_radix() && 14751481fe5fSLaurent Vivier ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0, 14761481fe5fSLaurent Vivier spapr->max_compat_pvr)) { 1477b4db5413SSuraj Jitindar Singh /* If using KVM with radix mode available, VCPUs can be started 1478b4db5413SSuraj Jitindar Singh * without a HPT because KVM will start them in radix mode. 1479b4db5413SSuraj Jitindar Singh * Set the GR bit in PATB so that we know there is no HPT. */ 1480b4db5413SSuraj Jitindar Singh spapr->patb_entry = PATBE1_GR; 1481b4db5413SSuraj Jitindar Singh } else { 1482b4db5413SSuraj Jitindar Singh spapr_setup_hpt_and_vrma(spapr); 1483c5f54f3eSDavid Gibson } 148453018216SPaolo Bonzini 148553018216SPaolo Bonzini qemu_devices_reset(); 148682512483SGreg Kurz 148782512483SGreg Kurz /* DRC reset may cause a device to be unplugged. This will cause troubles 148882512483SGreg Kurz * if this device is used by another device (eg, a running vhost backend 148982512483SGreg Kurz * will crash QEMU if the DIMM holding the vring goes away). To avoid such 149082512483SGreg Kurz * situations, we reset DRCs after all devices have been reset. 149182512483SGreg Kurz */ 149282512483SGreg Kurz object_child_foreach_recursive(object_get_root(), spapr_reset_drcs, NULL); 149382512483SGreg Kurz 149456258174SDaniel Henrique Barboza spapr_clear_pending_events(spapr); 149553018216SPaolo Bonzini 1496b7d1f77aSBenjamin Herrenschmidt /* 1497b7d1f77aSBenjamin Herrenschmidt * We place the device tree and RTAS just below either the top of the RMA, 1498b7d1f77aSBenjamin Herrenschmidt * or just below 2GB, whichever is lowere, so that it can be 1499b7d1f77aSBenjamin Herrenschmidt * processed with 32-bit real mode code if necessary 1500b7d1f77aSBenjamin Herrenschmidt */ 1501b7d1f77aSBenjamin Herrenschmidt rtas_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR); 1502cae172abSDavid Gibson rtas_addr = rtas_limit - RTAS_MAX_SIZE; 1503cae172abSDavid Gibson fdt_addr = rtas_addr - FDT_MAX_SIZE; 1504b7d1f77aSBenjamin Herrenschmidt 15056787d27bSMichael Roth /* if this reset wasn't generated by CAS, we should reset our 15066787d27bSMichael Roth * negotiated options and start from scratch */ 15076787d27bSMichael Roth if (!spapr->cas_reboot) { 15086787d27bSMichael Roth spapr_ovec_cleanup(spapr->ov5_cas); 15096787d27bSMichael Roth spapr->ov5_cas = spapr_ovec_new(); 151066d5c492SDavid Gibson 151151f84465SDavid Gibson ppc_set_compat(first_ppc_cpu, spapr->max_compat_pvr, &error_fatal); 15126787d27bSMichael Roth } 15136787d27bSMichael Roth 1514cae172abSDavid Gibson fdt = spapr_build_fdt(spapr, rtas_addr, spapr->rtas_size); 151553018216SPaolo Bonzini 15162cac78c1SDavid Gibson spapr_load_rtas(spapr, fdt, rtas_addr); 1517b7d1f77aSBenjamin Herrenschmidt 1518997b6cfcSDavid Gibson rc = fdt_pack(fdt); 1519997b6cfcSDavid Gibson 1520997b6cfcSDavid Gibson /* Should only fail if we've built a corrupted tree */ 1521997b6cfcSDavid Gibson assert(rc == 0); 1522997b6cfcSDavid Gibson 1523997b6cfcSDavid Gibson if (fdt_totalsize(fdt) > FDT_MAX_SIZE) { 1524997b6cfcSDavid Gibson error_report("FDT too big ! 0x%x bytes (max is 0x%x)", 1525997b6cfcSDavid Gibson fdt_totalsize(fdt), FDT_MAX_SIZE); 1526997b6cfcSDavid Gibson exit(1); 1527997b6cfcSDavid Gibson } 1528997b6cfcSDavid Gibson 1529997b6cfcSDavid Gibson /* Load the fdt */ 1530997b6cfcSDavid Gibson qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); 1531cae172abSDavid Gibson cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); 1532997b6cfcSDavid Gibson g_free(fdt); 1533997b6cfcSDavid Gibson 153453018216SPaolo Bonzini /* Set up the entry state */ 1535cae172abSDavid Gibson first_ppc_cpu->env.gpr[3] = fdt_addr; 1536182735efSAndreas Färber first_ppc_cpu->env.gpr[5] = 0; 1537182735efSAndreas Färber first_cpu->halted = 0; 15381b718907SDavid Gibson first_ppc_cpu->env.nip = SPAPR_ENTRY_POINT; 153953018216SPaolo Bonzini 15406787d27bSMichael Roth spapr->cas_reboot = false; 154153018216SPaolo Bonzini } 154253018216SPaolo Bonzini 154328e02042SDavid Gibson static void spapr_create_nvram(sPAPRMachineState *spapr) 154453018216SPaolo Bonzini { 15452ff3de68SMarkus Armbruster DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram"); 15463978b863SPaolo Bonzini DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); 154753018216SPaolo Bonzini 15483978b863SPaolo Bonzini if (dinfo) { 15496231a6daSMarkus Armbruster qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo), 15506231a6daSMarkus Armbruster &error_fatal); 155153018216SPaolo Bonzini } 155253018216SPaolo Bonzini 155353018216SPaolo Bonzini qdev_init_nofail(dev); 155453018216SPaolo Bonzini 155553018216SPaolo Bonzini spapr->nvram = (struct sPAPRNVRAM *)dev; 155653018216SPaolo Bonzini } 155753018216SPaolo Bonzini 155828e02042SDavid Gibson static void spapr_rtc_create(sPAPRMachineState *spapr) 155928df36a1SDavid Gibson { 1560147ff807SCédric Le Goater object_initialize(&spapr->rtc, sizeof(spapr->rtc), TYPE_SPAPR_RTC); 1561147ff807SCédric Le Goater object_property_add_child(OBJECT(spapr), "rtc", OBJECT(&spapr->rtc), 1562147ff807SCédric Le Goater &error_fatal); 1563147ff807SCédric Le Goater object_property_set_bool(OBJECT(&spapr->rtc), true, "realized", 1564147ff807SCédric Le Goater &error_fatal); 1565147ff807SCédric Le Goater object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc), 1566147ff807SCédric Le Goater "date", &error_fatal); 156728df36a1SDavid Gibson } 156828df36a1SDavid Gibson 156953018216SPaolo Bonzini /* Returns whether we want to use VGA or not */ 157014c6a894SDavid Gibson static bool spapr_vga_init(PCIBus *pci_bus, Error **errp) 157153018216SPaolo Bonzini { 157253018216SPaolo Bonzini switch (vga_interface_type) { 157353018216SPaolo Bonzini case VGA_NONE: 15747effdaa3SMark Wu return false; 15757effdaa3SMark Wu case VGA_DEVICE: 15767effdaa3SMark Wu return true; 157753018216SPaolo Bonzini case VGA_STD: 1578b798c190SBenjamin Herrenschmidt case VGA_VIRTIO: 157953018216SPaolo Bonzini return pci_vga_init(pci_bus) != NULL; 158053018216SPaolo Bonzini default: 158114c6a894SDavid Gibson error_setg(errp, 158214c6a894SDavid Gibson "Unsupported VGA mode, only -vga std or -vga virtio is supported"); 158314c6a894SDavid Gibson return false; 158453018216SPaolo Bonzini } 158553018216SPaolo Bonzini } 158653018216SPaolo Bonzini 1587880ae7deSDavid Gibson static int spapr_post_load(void *opaque, int version_id) 1588880ae7deSDavid Gibson { 158928e02042SDavid Gibson sPAPRMachineState *spapr = (sPAPRMachineState *)opaque; 1590880ae7deSDavid Gibson int err = 0; 1591880ae7deSDavid Gibson 1592a7ff1212SCédric Le Goater if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) { 15935bc8d26dSCédric Le Goater CPUState *cs; 15945bc8d26dSCédric Le Goater CPU_FOREACH(cs) { 15955bc8d26dSCédric Le Goater PowerPCCPU *cpu = POWERPC_CPU(cs); 15965bc8d26dSCédric Le Goater icp_resend(ICP(cpu->intc)); 1597a7ff1212SCédric Le Goater } 1598a7ff1212SCédric Le Goater } 1599a7ff1212SCédric Le Goater 1600631b22eaSStefan Weil /* In earlier versions, there was no separate qdev for the PAPR 1601880ae7deSDavid Gibson * RTC, so the RTC offset was stored directly in sPAPREnvironment. 1602880ae7deSDavid Gibson * So when migrating from those versions, poke the incoming offset 1603880ae7deSDavid Gibson * value into the RTC device */ 1604880ae7deSDavid Gibson if (version_id < 3) { 1605147ff807SCédric Le Goater err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset); 1606880ae7deSDavid Gibson } 1607880ae7deSDavid Gibson 16080c86b2dfSLaurent Vivier if (kvm_enabled() && spapr->patb_entry) { 1609d39c90f5SBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(first_cpu); 1610d39c90f5SBharata B Rao bool radix = !!(spapr->patb_entry & PATBE1_GR); 1611d39c90f5SBharata B Rao bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE); 1612d39c90f5SBharata B Rao 1613d39c90f5SBharata B Rao err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry); 1614d39c90f5SBharata B Rao if (err) { 1615d39c90f5SBharata B Rao error_report("Process table config unsupported by the host"); 1616d39c90f5SBharata B Rao return -EINVAL; 1617d39c90f5SBharata B Rao } 1618d39c90f5SBharata B Rao } 1619d39c90f5SBharata B Rao 1620880ae7deSDavid Gibson return err; 1621880ae7deSDavid Gibson } 1622880ae7deSDavid Gibson 1623880ae7deSDavid Gibson static bool version_before_3(void *opaque, int version_id) 1624880ae7deSDavid Gibson { 1625880ae7deSDavid Gibson return version_id < 3; 1626880ae7deSDavid Gibson } 1627880ae7deSDavid Gibson 1628fd38804bSDaniel Henrique Barboza static bool spapr_pending_events_needed(void *opaque) 1629fd38804bSDaniel Henrique Barboza { 1630fd38804bSDaniel Henrique Barboza sPAPRMachineState *spapr = (sPAPRMachineState *)opaque; 1631fd38804bSDaniel Henrique Barboza return !QTAILQ_EMPTY(&spapr->pending_events); 1632fd38804bSDaniel Henrique Barboza } 1633fd38804bSDaniel Henrique Barboza 1634fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_event_entry = { 1635fd38804bSDaniel Henrique Barboza .name = "spapr_event_log_entry", 1636fd38804bSDaniel Henrique Barboza .version_id = 1, 1637fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1638fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 16395341258eSDavid Gibson VMSTATE_UINT32(summary, sPAPREventLogEntry), 16405341258eSDavid Gibson VMSTATE_UINT32(extended_length, sPAPREventLogEntry), 1641fd38804bSDaniel Henrique Barboza VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, sPAPREventLogEntry, 0, 16425341258eSDavid Gibson NULL, extended_length), 1643fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1644fd38804bSDaniel Henrique Barboza }, 1645fd38804bSDaniel Henrique Barboza }; 1646fd38804bSDaniel Henrique Barboza 1647fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_pending_events = { 1648fd38804bSDaniel Henrique Barboza .name = "spapr_pending_events", 1649fd38804bSDaniel Henrique Barboza .version_id = 1, 1650fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1651fd38804bSDaniel Henrique Barboza .needed = spapr_pending_events_needed, 1652fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 1653fd38804bSDaniel Henrique Barboza VMSTATE_QTAILQ_V(pending_events, sPAPRMachineState, 1, 1654fd38804bSDaniel Henrique Barboza vmstate_spapr_event_entry, sPAPREventLogEntry, next), 1655fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1656fd38804bSDaniel Henrique Barboza }, 1657fd38804bSDaniel Henrique Barboza }; 1658fd38804bSDaniel Henrique Barboza 165962ef3760SMichael Roth static bool spapr_ov5_cas_needed(void *opaque) 166062ef3760SMichael Roth { 166162ef3760SMichael Roth sPAPRMachineState *spapr = opaque; 166262ef3760SMichael Roth sPAPROptionVector *ov5_mask = spapr_ovec_new(); 166362ef3760SMichael Roth sPAPROptionVector *ov5_legacy = spapr_ovec_new(); 166462ef3760SMichael Roth sPAPROptionVector *ov5_removed = spapr_ovec_new(); 166562ef3760SMichael Roth bool cas_needed; 166662ef3760SMichael Roth 166762ef3760SMichael Roth /* Prior to the introduction of sPAPROptionVector, we had two option 166862ef3760SMichael Roth * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY. 166962ef3760SMichael Roth * Both of these options encode machine topology into the device-tree 167062ef3760SMichael Roth * in such a way that the now-booted OS should still be able to interact 167162ef3760SMichael Roth * appropriately with QEMU regardless of what options were actually 167262ef3760SMichael Roth * negotiatied on the source side. 167362ef3760SMichael Roth * 167462ef3760SMichael Roth * As such, we can avoid migrating the CAS-negotiated options if these 167562ef3760SMichael Roth * are the only options available on the current machine/platform. 167662ef3760SMichael Roth * Since these are the only options available for pseries-2.7 and 167762ef3760SMichael Roth * earlier, this allows us to maintain old->new/new->old migration 167862ef3760SMichael Roth * compatibility. 167962ef3760SMichael Roth * 168062ef3760SMichael Roth * For QEMU 2.8+, there are additional CAS-negotiatable options available 168162ef3760SMichael Roth * via default pseries-2.8 machines and explicit command-line parameters. 168262ef3760SMichael Roth * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware 168362ef3760SMichael Roth * of the actual CAS-negotiated values to continue working properly. For 168462ef3760SMichael Roth * example, availability of memory unplug depends on knowing whether 168562ef3760SMichael Roth * OV5_HP_EVT was negotiated via CAS. 168662ef3760SMichael Roth * 168762ef3760SMichael Roth * Thus, for any cases where the set of available CAS-negotiatable 168862ef3760SMichael Roth * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we 168962ef3760SMichael Roth * include the CAS-negotiated options in the migration stream. 169062ef3760SMichael Roth */ 169162ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY); 169262ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY); 169362ef3760SMichael Roth 169462ef3760SMichael Roth /* spapr_ovec_diff returns true if bits were removed. we avoid using 169562ef3760SMichael Roth * the mask itself since in the future it's possible "legacy" bits may be 169662ef3760SMichael Roth * removed via machine options, which could generate a false positive 169762ef3760SMichael Roth * that breaks migration. 169862ef3760SMichael Roth */ 169962ef3760SMichael Roth spapr_ovec_intersect(ov5_legacy, spapr->ov5, ov5_mask); 170062ef3760SMichael Roth cas_needed = spapr_ovec_diff(ov5_removed, spapr->ov5, ov5_legacy); 170162ef3760SMichael Roth 170262ef3760SMichael Roth spapr_ovec_cleanup(ov5_mask); 170362ef3760SMichael Roth spapr_ovec_cleanup(ov5_legacy); 170462ef3760SMichael Roth spapr_ovec_cleanup(ov5_removed); 170562ef3760SMichael Roth 170662ef3760SMichael Roth return cas_needed; 170762ef3760SMichael Roth } 170862ef3760SMichael Roth 170962ef3760SMichael Roth static const VMStateDescription vmstate_spapr_ov5_cas = { 171062ef3760SMichael Roth .name = "spapr_option_vector_ov5_cas", 171162ef3760SMichael Roth .version_id = 1, 171262ef3760SMichael Roth .minimum_version_id = 1, 171362ef3760SMichael Roth .needed = spapr_ov5_cas_needed, 171462ef3760SMichael Roth .fields = (VMStateField[]) { 171562ef3760SMichael Roth VMSTATE_STRUCT_POINTER_V(ov5_cas, sPAPRMachineState, 1, 171662ef3760SMichael Roth vmstate_spapr_ovec, sPAPROptionVector), 171762ef3760SMichael Roth VMSTATE_END_OF_LIST() 171862ef3760SMichael Roth }, 171962ef3760SMichael Roth }; 172062ef3760SMichael Roth 17219861bb3eSSuraj Jitindar Singh static bool spapr_patb_entry_needed(void *opaque) 17229861bb3eSSuraj Jitindar Singh { 17239861bb3eSSuraj Jitindar Singh sPAPRMachineState *spapr = opaque; 17249861bb3eSSuraj Jitindar Singh 17259861bb3eSSuraj Jitindar Singh return !!spapr->patb_entry; 17269861bb3eSSuraj Jitindar Singh } 17279861bb3eSSuraj Jitindar Singh 17289861bb3eSSuraj Jitindar Singh static const VMStateDescription vmstate_spapr_patb_entry = { 17299861bb3eSSuraj Jitindar Singh .name = "spapr_patb_entry", 17309861bb3eSSuraj Jitindar Singh .version_id = 1, 17319861bb3eSSuraj Jitindar Singh .minimum_version_id = 1, 17329861bb3eSSuraj Jitindar Singh .needed = spapr_patb_entry_needed, 17339861bb3eSSuraj Jitindar Singh .fields = (VMStateField[]) { 17349861bb3eSSuraj Jitindar Singh VMSTATE_UINT64(patb_entry, sPAPRMachineState), 17359861bb3eSSuraj Jitindar Singh VMSTATE_END_OF_LIST() 17369861bb3eSSuraj Jitindar Singh }, 17379861bb3eSSuraj Jitindar Singh }; 17389861bb3eSSuraj Jitindar Singh 17394be21d56SDavid Gibson static const VMStateDescription vmstate_spapr = { 17404be21d56SDavid Gibson .name = "spapr", 1741880ae7deSDavid Gibson .version_id = 3, 17424be21d56SDavid Gibson .minimum_version_id = 1, 1743880ae7deSDavid Gibson .post_load = spapr_post_load, 17444be21d56SDavid Gibson .fields = (VMStateField[]) { 1745880ae7deSDavid Gibson /* used to be @next_irq */ 1746880ae7deSDavid Gibson VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4), 17474be21d56SDavid Gibson 17484be21d56SDavid Gibson /* RTC offset */ 174928e02042SDavid Gibson VMSTATE_UINT64_TEST(rtc_offset, sPAPRMachineState, version_before_3), 1750880ae7deSDavid Gibson 175128e02042SDavid Gibson VMSTATE_PPC_TIMEBASE_V(tb, sPAPRMachineState, 2), 17524be21d56SDavid Gibson VMSTATE_END_OF_LIST() 17534be21d56SDavid Gibson }, 175462ef3760SMichael Roth .subsections = (const VMStateDescription*[]) { 175562ef3760SMichael Roth &vmstate_spapr_ov5_cas, 17569861bb3eSSuraj Jitindar Singh &vmstate_spapr_patb_entry, 1757fd38804bSDaniel Henrique Barboza &vmstate_spapr_pending_events, 175862ef3760SMichael Roth NULL 175962ef3760SMichael Roth } 17604be21d56SDavid Gibson }; 17614be21d56SDavid Gibson 17624be21d56SDavid Gibson static int htab_save_setup(QEMUFile *f, void *opaque) 17634be21d56SDavid Gibson { 176428e02042SDavid Gibson sPAPRMachineState *spapr = opaque; 17654be21d56SDavid Gibson 17664be21d56SDavid Gibson /* "Iteration" header */ 17673a384297SBharata B Rao if (!spapr->htab_shift) { 17683a384297SBharata B Rao qemu_put_be32(f, -1); 17693a384297SBharata B Rao } else { 17704be21d56SDavid Gibson qemu_put_be32(f, spapr->htab_shift); 17713a384297SBharata B Rao } 17724be21d56SDavid Gibson 1773e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 1774e68cb8b4SAlexey Kardashevskiy spapr->htab_save_index = 0; 1775e68cb8b4SAlexey Kardashevskiy spapr->htab_first_pass = true; 1776e68cb8b4SAlexey Kardashevskiy } else { 17773a384297SBharata B Rao if (spapr->htab_shift) { 1778e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 17794be21d56SDavid Gibson } 17803a384297SBharata B Rao } 17814be21d56SDavid Gibson 1782e68cb8b4SAlexey Kardashevskiy 1783e68cb8b4SAlexey Kardashevskiy return 0; 1784e68cb8b4SAlexey Kardashevskiy } 17854be21d56SDavid Gibson 1786332f7721SGreg Kurz static void htab_save_chunk(QEMUFile *f, sPAPRMachineState *spapr, 1787332f7721SGreg Kurz int chunkstart, int n_valid, int n_invalid) 1788332f7721SGreg Kurz { 1789332f7721SGreg Kurz qemu_put_be32(f, chunkstart); 1790332f7721SGreg Kurz qemu_put_be16(f, n_valid); 1791332f7721SGreg Kurz qemu_put_be16(f, n_invalid); 1792332f7721SGreg Kurz qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), 1793332f7721SGreg Kurz HASH_PTE_SIZE_64 * n_valid); 1794332f7721SGreg Kurz } 1795332f7721SGreg Kurz 1796332f7721SGreg Kurz static void htab_save_end_marker(QEMUFile *f) 1797332f7721SGreg Kurz { 1798332f7721SGreg Kurz qemu_put_be32(f, 0); 1799332f7721SGreg Kurz qemu_put_be16(f, 0); 1800332f7721SGreg Kurz qemu_put_be16(f, 0); 1801332f7721SGreg Kurz } 1802332f7721SGreg Kurz 180328e02042SDavid Gibson static void htab_save_first_pass(QEMUFile *f, sPAPRMachineState *spapr, 18044be21d56SDavid Gibson int64_t max_ns) 18054be21d56SDavid Gibson { 1806378bc217SDavid Gibson bool has_timeout = max_ns != -1; 18074be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 18084be21d56SDavid Gibson int index = spapr->htab_save_index; 1809bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 18104be21d56SDavid Gibson 18114be21d56SDavid Gibson assert(spapr->htab_first_pass); 18124be21d56SDavid Gibson 18134be21d56SDavid Gibson do { 18144be21d56SDavid Gibson int chunkstart; 18154be21d56SDavid Gibson 18164be21d56SDavid Gibson /* Consume invalid HPTEs */ 18174be21d56SDavid Gibson while ((index < htabslots) 18184be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 18194be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 182024ec2863SMarc-André Lureau index++; 18214be21d56SDavid Gibson } 18224be21d56SDavid Gibson 18234be21d56SDavid Gibson /* Consume valid HPTEs */ 18244be21d56SDavid Gibson chunkstart = index; 1825338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 18264be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 18274be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 182824ec2863SMarc-André Lureau index++; 18294be21d56SDavid Gibson } 18304be21d56SDavid Gibson 18314be21d56SDavid Gibson if (index > chunkstart) { 18324be21d56SDavid Gibson int n_valid = index - chunkstart; 18334be21d56SDavid Gibson 1834332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, 0); 18354be21d56SDavid Gibson 1836378bc217SDavid Gibson if (has_timeout && 1837378bc217SDavid Gibson (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 18384be21d56SDavid Gibson break; 18394be21d56SDavid Gibson } 18404be21d56SDavid Gibson } 18414be21d56SDavid Gibson } while ((index < htabslots) && !qemu_file_rate_limit(f)); 18424be21d56SDavid Gibson 18434be21d56SDavid Gibson if (index >= htabslots) { 18444be21d56SDavid Gibson assert(index == htabslots); 18454be21d56SDavid Gibson index = 0; 18464be21d56SDavid Gibson spapr->htab_first_pass = false; 18474be21d56SDavid Gibson } 18484be21d56SDavid Gibson spapr->htab_save_index = index; 18494be21d56SDavid Gibson } 18504be21d56SDavid Gibson 185128e02042SDavid Gibson static int htab_save_later_pass(QEMUFile *f, sPAPRMachineState *spapr, 18524be21d56SDavid Gibson int64_t max_ns) 18534be21d56SDavid Gibson { 18544be21d56SDavid Gibson bool final = max_ns < 0; 18554be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 18564be21d56SDavid Gibson int examined = 0, sent = 0; 18574be21d56SDavid Gibson int index = spapr->htab_save_index; 1858bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 18594be21d56SDavid Gibson 18604be21d56SDavid Gibson assert(!spapr->htab_first_pass); 18614be21d56SDavid Gibson 18624be21d56SDavid Gibson do { 18634be21d56SDavid Gibson int chunkstart, invalidstart; 18644be21d56SDavid Gibson 18654be21d56SDavid Gibson /* Consume non-dirty HPTEs */ 18664be21d56SDavid Gibson while ((index < htabslots) 18674be21d56SDavid Gibson && !HPTE_DIRTY(HPTE(spapr->htab, index))) { 18684be21d56SDavid Gibson index++; 18694be21d56SDavid Gibson examined++; 18704be21d56SDavid Gibson } 18714be21d56SDavid Gibson 18724be21d56SDavid Gibson chunkstart = index; 18734be21d56SDavid Gibson /* Consume valid dirty HPTEs */ 1874338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 18754be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 18764be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 18774be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 18784be21d56SDavid Gibson index++; 18794be21d56SDavid Gibson examined++; 18804be21d56SDavid Gibson } 18814be21d56SDavid Gibson 18824be21d56SDavid Gibson invalidstart = index; 18834be21d56SDavid Gibson /* Consume invalid dirty HPTEs */ 1884338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - invalidstart < USHRT_MAX) 18854be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 18864be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 18874be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 18884be21d56SDavid Gibson index++; 18894be21d56SDavid Gibson examined++; 18904be21d56SDavid Gibson } 18914be21d56SDavid Gibson 18924be21d56SDavid Gibson if (index > chunkstart) { 18934be21d56SDavid Gibson int n_valid = invalidstart - chunkstart; 18944be21d56SDavid Gibson int n_invalid = index - invalidstart; 18954be21d56SDavid Gibson 1896332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid); 18974be21d56SDavid Gibson sent += index - chunkstart; 18984be21d56SDavid Gibson 1899bc72ad67SAlex Bligh if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 19004be21d56SDavid Gibson break; 19014be21d56SDavid Gibson } 19024be21d56SDavid Gibson } 19034be21d56SDavid Gibson 19044be21d56SDavid Gibson if (examined >= htabslots) { 19054be21d56SDavid Gibson break; 19064be21d56SDavid Gibson } 19074be21d56SDavid Gibson 19084be21d56SDavid Gibson if (index >= htabslots) { 19094be21d56SDavid Gibson assert(index == htabslots); 19104be21d56SDavid Gibson index = 0; 19114be21d56SDavid Gibson } 19124be21d56SDavid Gibson } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); 19134be21d56SDavid Gibson 19144be21d56SDavid Gibson if (index >= htabslots) { 19154be21d56SDavid Gibson assert(index == htabslots); 19164be21d56SDavid Gibson index = 0; 19174be21d56SDavid Gibson } 19184be21d56SDavid Gibson 19194be21d56SDavid Gibson spapr->htab_save_index = index; 19204be21d56SDavid Gibson 1921e68cb8b4SAlexey Kardashevskiy return (examined >= htabslots) && (sent == 0) ? 1 : 0; 19224be21d56SDavid Gibson } 19234be21d56SDavid Gibson 1924e68cb8b4SAlexey Kardashevskiy #define MAX_ITERATION_NS 5000000 /* 5 ms */ 1925e68cb8b4SAlexey Kardashevskiy #define MAX_KVM_BUF_SIZE 2048 1926e68cb8b4SAlexey Kardashevskiy 19274be21d56SDavid Gibson static int htab_save_iterate(QEMUFile *f, void *opaque) 19284be21d56SDavid Gibson { 192928e02042SDavid Gibson sPAPRMachineState *spapr = opaque; 1930715c5407SDavid Gibson int fd; 1931e68cb8b4SAlexey Kardashevskiy int rc = 0; 19324be21d56SDavid Gibson 19334be21d56SDavid Gibson /* Iteration header */ 19343a384297SBharata B Rao if (!spapr->htab_shift) { 19353a384297SBharata B Rao qemu_put_be32(f, -1); 1936e8cd4247SLaurent Vivier return 1; 19373a384297SBharata B Rao } else { 19384be21d56SDavid Gibson qemu_put_be32(f, 0); 19393a384297SBharata B Rao } 19404be21d56SDavid Gibson 1941e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 1942e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 1943e68cb8b4SAlexey Kardashevskiy 1944715c5407SDavid Gibson fd = get_htab_fd(spapr); 1945715c5407SDavid Gibson if (fd < 0) { 1946715c5407SDavid Gibson return fd; 194701a57972SSamuel Mendoza-Jonas } 194801a57972SSamuel Mendoza-Jonas 1949715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS); 1950e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 1951e68cb8b4SAlexey Kardashevskiy return rc; 1952e68cb8b4SAlexey Kardashevskiy } 1953e68cb8b4SAlexey Kardashevskiy } else if (spapr->htab_first_pass) { 19544be21d56SDavid Gibson htab_save_first_pass(f, spapr, MAX_ITERATION_NS); 19554be21d56SDavid Gibson } else { 1956e68cb8b4SAlexey Kardashevskiy rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS); 19574be21d56SDavid Gibson } 19584be21d56SDavid Gibson 1959332f7721SGreg Kurz htab_save_end_marker(f); 19604be21d56SDavid Gibson 1961e68cb8b4SAlexey Kardashevskiy return rc; 19624be21d56SDavid Gibson } 19634be21d56SDavid Gibson 19644be21d56SDavid Gibson static int htab_save_complete(QEMUFile *f, void *opaque) 19654be21d56SDavid Gibson { 196628e02042SDavid Gibson sPAPRMachineState *spapr = opaque; 1967715c5407SDavid Gibson int fd; 19684be21d56SDavid Gibson 19694be21d56SDavid Gibson /* Iteration header */ 19703a384297SBharata B Rao if (!spapr->htab_shift) { 19713a384297SBharata B Rao qemu_put_be32(f, -1); 19723a384297SBharata B Rao return 0; 19733a384297SBharata B Rao } else { 19744be21d56SDavid Gibson qemu_put_be32(f, 0); 19753a384297SBharata B Rao } 19764be21d56SDavid Gibson 1977e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 1978e68cb8b4SAlexey Kardashevskiy int rc; 1979e68cb8b4SAlexey Kardashevskiy 1980e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 1981e68cb8b4SAlexey Kardashevskiy 1982715c5407SDavid Gibson fd = get_htab_fd(spapr); 1983715c5407SDavid Gibson if (fd < 0) { 1984715c5407SDavid Gibson return fd; 198501a57972SSamuel Mendoza-Jonas } 198601a57972SSamuel Mendoza-Jonas 1987715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1); 1988e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 1989e68cb8b4SAlexey Kardashevskiy return rc; 1990e68cb8b4SAlexey Kardashevskiy } 1991e68cb8b4SAlexey Kardashevskiy } else { 1992378bc217SDavid Gibson if (spapr->htab_first_pass) { 1993378bc217SDavid Gibson htab_save_first_pass(f, spapr, -1); 1994378bc217SDavid Gibson } 19954be21d56SDavid Gibson htab_save_later_pass(f, spapr, -1); 1996e68cb8b4SAlexey Kardashevskiy } 19974be21d56SDavid Gibson 19984be21d56SDavid Gibson /* End marker */ 1999332f7721SGreg Kurz htab_save_end_marker(f); 20004be21d56SDavid Gibson 20014be21d56SDavid Gibson return 0; 20024be21d56SDavid Gibson } 20034be21d56SDavid Gibson 20044be21d56SDavid Gibson static int htab_load(QEMUFile *f, void *opaque, int version_id) 20054be21d56SDavid Gibson { 200628e02042SDavid Gibson sPAPRMachineState *spapr = opaque; 20074be21d56SDavid Gibson uint32_t section_hdr; 2008e68cb8b4SAlexey Kardashevskiy int fd = -1; 200914b0d748SGreg Kurz Error *local_err = NULL; 20104be21d56SDavid Gibson 20114be21d56SDavid Gibson if (version_id < 1 || version_id > 1) { 201298a5d100SDavid Gibson error_report("htab_load() bad version"); 20134be21d56SDavid Gibson return -EINVAL; 20144be21d56SDavid Gibson } 20154be21d56SDavid Gibson 20164be21d56SDavid Gibson section_hdr = qemu_get_be32(f); 20174be21d56SDavid Gibson 20183a384297SBharata B Rao if (section_hdr == -1) { 20193a384297SBharata B Rao spapr_free_hpt(spapr); 20203a384297SBharata B Rao return 0; 20213a384297SBharata B Rao } 20223a384297SBharata B Rao 20234be21d56SDavid Gibson if (section_hdr) { 2024c5f54f3eSDavid Gibson /* First section gives the htab size */ 2025c5f54f3eSDavid Gibson spapr_reallocate_hpt(spapr, section_hdr, &local_err); 2026c5f54f3eSDavid Gibson if (local_err) { 2027c5f54f3eSDavid Gibson error_report_err(local_err); 20284be21d56SDavid Gibson return -EINVAL; 20294be21d56SDavid Gibson } 20304be21d56SDavid Gibson return 0; 20314be21d56SDavid Gibson } 20324be21d56SDavid Gibson 2033e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2034e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2035e68cb8b4SAlexey Kardashevskiy 203614b0d748SGreg Kurz fd = kvmppc_get_htab_fd(true, 0, &local_err); 2037e68cb8b4SAlexey Kardashevskiy if (fd < 0) { 203814b0d748SGreg Kurz error_report_err(local_err); 203982be8e73SGreg Kurz return fd; 2040e68cb8b4SAlexey Kardashevskiy } 2041e68cb8b4SAlexey Kardashevskiy } 2042e68cb8b4SAlexey Kardashevskiy 20434be21d56SDavid Gibson while (true) { 20444be21d56SDavid Gibson uint32_t index; 20454be21d56SDavid Gibson uint16_t n_valid, n_invalid; 20464be21d56SDavid Gibson 20474be21d56SDavid Gibson index = qemu_get_be32(f); 20484be21d56SDavid Gibson n_valid = qemu_get_be16(f); 20494be21d56SDavid Gibson n_invalid = qemu_get_be16(f); 20504be21d56SDavid Gibson 20514be21d56SDavid Gibson if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) { 20524be21d56SDavid Gibson /* End of Stream */ 20534be21d56SDavid Gibson break; 20544be21d56SDavid Gibson } 20554be21d56SDavid Gibson 2056e68cb8b4SAlexey Kardashevskiy if ((index + n_valid + n_invalid) > 20574be21d56SDavid Gibson (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) { 20584be21d56SDavid Gibson /* Bad index in stream */ 205998a5d100SDavid Gibson error_report( 206098a5d100SDavid Gibson "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)", 206198a5d100SDavid Gibson index, n_valid, n_invalid, spapr->htab_shift); 20624be21d56SDavid Gibson return -EINVAL; 20634be21d56SDavid Gibson } 20644be21d56SDavid Gibson 2065e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 20664be21d56SDavid Gibson if (n_valid) { 20674be21d56SDavid Gibson qemu_get_buffer(f, HPTE(spapr->htab, index), 20684be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_valid); 20694be21d56SDavid Gibson } 20704be21d56SDavid Gibson if (n_invalid) { 20714be21d56SDavid Gibson memset(HPTE(spapr->htab, index + n_valid), 0, 20724be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_invalid); 20734be21d56SDavid Gibson } 2074e68cb8b4SAlexey Kardashevskiy } else { 2075e68cb8b4SAlexey Kardashevskiy int rc; 2076e68cb8b4SAlexey Kardashevskiy 2077e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2078e68cb8b4SAlexey Kardashevskiy 2079e68cb8b4SAlexey Kardashevskiy rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid); 2080e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2081e68cb8b4SAlexey Kardashevskiy return rc; 2082e68cb8b4SAlexey Kardashevskiy } 2083e68cb8b4SAlexey Kardashevskiy } 2084e68cb8b4SAlexey Kardashevskiy } 2085e68cb8b4SAlexey Kardashevskiy 2086e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2087e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2088e68cb8b4SAlexey Kardashevskiy close(fd); 20894be21d56SDavid Gibson } 20904be21d56SDavid Gibson 20914be21d56SDavid Gibson return 0; 20924be21d56SDavid Gibson } 20934be21d56SDavid Gibson 209470f794fcSJuan Quintela static void htab_save_cleanup(void *opaque) 2095c573fc03SThomas Huth { 2096c573fc03SThomas Huth sPAPRMachineState *spapr = opaque; 2097c573fc03SThomas Huth 2098c573fc03SThomas Huth close_htab_fd(spapr); 2099c573fc03SThomas Huth } 2100c573fc03SThomas Huth 21014be21d56SDavid Gibson static SaveVMHandlers savevm_htab_handlers = { 21029907e842SJuan Quintela .save_setup = htab_save_setup, 21034be21d56SDavid Gibson .save_live_iterate = htab_save_iterate, 2104a3e06c3dSDr. David Alan Gilbert .save_live_complete_precopy = htab_save_complete, 210570f794fcSJuan Quintela .save_cleanup = htab_save_cleanup, 21064be21d56SDavid Gibson .load_state = htab_load, 21074be21d56SDavid Gibson }; 21084be21d56SDavid Gibson 21095b2128d2SAlexander Graf static void spapr_boot_set(void *opaque, const char *boot_device, 21105b2128d2SAlexander Graf Error **errp) 21115b2128d2SAlexander Graf { 2112c86c1affSDaniel Henrique Barboza MachineState *machine = MACHINE(opaque); 21135b2128d2SAlexander Graf machine->boot_order = g_strdup(boot_device); 21145b2128d2SAlexander Graf } 21155b2128d2SAlexander Graf 2116224245bfSDavid Gibson static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr) 2117224245bfSDavid Gibson { 2118224245bfSDavid Gibson MachineState *machine = MACHINE(spapr); 2119224245bfSDavid Gibson uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 2120e8f986fcSBharata B Rao uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size; 2121224245bfSDavid Gibson int i; 2122224245bfSDavid Gibson 2123224245bfSDavid Gibson for (i = 0; i < nr_lmbs; i++) { 2124224245bfSDavid Gibson uint64_t addr; 2125224245bfSDavid Gibson 2126e8f986fcSBharata B Rao addr = i * lmb_size + spapr->hotplug_memory.base; 21276caf3ac6SDavid Gibson spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB, 2128224245bfSDavid Gibson addr / lmb_size); 2129224245bfSDavid Gibson } 2130224245bfSDavid Gibson } 2131224245bfSDavid Gibson 2132224245bfSDavid Gibson /* 2133224245bfSDavid Gibson * If RAM size, maxmem size and individual node mem sizes aren't aligned 2134224245bfSDavid Gibson * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest 2135224245bfSDavid Gibson * since we can't support such unaligned sizes with DRCONF_MEMORY. 2136224245bfSDavid Gibson */ 21377c150d6fSDavid Gibson static void spapr_validate_node_memory(MachineState *machine, Error **errp) 2138224245bfSDavid Gibson { 2139224245bfSDavid Gibson int i; 2140224245bfSDavid Gibson 21417c150d6fSDavid Gibson if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) { 21427c150d6fSDavid Gibson error_setg(errp, "Memory size 0x" RAM_ADDR_FMT 21437c150d6fSDavid Gibson " is not aligned to %llu MiB", 21447c150d6fSDavid Gibson machine->ram_size, 2145224245bfSDavid Gibson SPAPR_MEMORY_BLOCK_SIZE / M_BYTE); 21467c150d6fSDavid Gibson return; 21477c150d6fSDavid Gibson } 21487c150d6fSDavid Gibson 21497c150d6fSDavid Gibson if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) { 21507c150d6fSDavid Gibson error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT 21517c150d6fSDavid Gibson " is not aligned to %llu MiB", 21527c150d6fSDavid Gibson machine->ram_size, 21537c150d6fSDavid Gibson SPAPR_MEMORY_BLOCK_SIZE / M_BYTE); 21547c150d6fSDavid Gibson return; 2155224245bfSDavid Gibson } 2156224245bfSDavid Gibson 2157224245bfSDavid Gibson for (i = 0; i < nb_numa_nodes; i++) { 2158224245bfSDavid Gibson if (numa_info[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) { 21597c150d6fSDavid Gibson error_setg(errp, 21607c150d6fSDavid Gibson "Node %d memory size 0x%" PRIx64 21617c150d6fSDavid Gibson " is not aligned to %llu MiB", 21627c150d6fSDavid Gibson i, numa_info[i].node_mem, 2163224245bfSDavid Gibson SPAPR_MEMORY_BLOCK_SIZE / M_BYTE); 21647c150d6fSDavid Gibson return; 2165224245bfSDavid Gibson } 2166224245bfSDavid Gibson } 2167224245bfSDavid Gibson } 2168224245bfSDavid Gibson 2169535455fdSIgor Mammedov /* find cpu slot in machine->possible_cpus by core_id */ 2170535455fdSIgor Mammedov static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx) 2171535455fdSIgor Mammedov { 2172535455fdSIgor Mammedov int index = id / smp_threads; 2173535455fdSIgor Mammedov 2174535455fdSIgor Mammedov if (index >= ms->possible_cpus->len) { 2175535455fdSIgor Mammedov return NULL; 2176535455fdSIgor Mammedov } 2177535455fdSIgor Mammedov if (idx) { 2178535455fdSIgor Mammedov *idx = index; 2179535455fdSIgor Mammedov } 2180535455fdSIgor Mammedov return &ms->possible_cpus->cpus[index]; 2181535455fdSIgor Mammedov } 2182535455fdSIgor Mammedov 21830c86d0fdSDavid Gibson static void spapr_init_cpus(sPAPRMachineState *spapr) 21840c86d0fdSDavid Gibson { 21850c86d0fdSDavid Gibson MachineState *machine = MACHINE(spapr); 21860c86d0fdSDavid Gibson MachineClass *mc = MACHINE_GET_CLASS(machine); 21872e9c10ebSIgor Mammedov const char *type = spapr_get_cpu_core_type(machine->cpu_type); 21880c86d0fdSDavid Gibson int smt = kvmppc_smt_threads(); 2189535455fdSIgor Mammedov const CPUArchIdList *possible_cpus; 2190535455fdSIgor Mammedov int boot_cores_nr = smp_cpus / smp_threads; 21910c86d0fdSDavid Gibson int i; 21920c86d0fdSDavid Gibson 21930c86d0fdSDavid Gibson if (!type) { 21940c86d0fdSDavid Gibson error_report("Unable to find sPAPR CPU Core definition"); 21950c86d0fdSDavid Gibson exit(1); 21960c86d0fdSDavid Gibson } 21970c86d0fdSDavid Gibson 2198535455fdSIgor Mammedov possible_cpus = mc->possible_cpu_arch_ids(machine); 2199c5514d0eSIgor Mammedov if (mc->has_hotpluggable_cpus) { 22000c86d0fdSDavid Gibson if (smp_cpus % smp_threads) { 22010c86d0fdSDavid Gibson error_report("smp_cpus (%u) must be multiple of threads (%u)", 22020c86d0fdSDavid Gibson smp_cpus, smp_threads); 22030c86d0fdSDavid Gibson exit(1); 22040c86d0fdSDavid Gibson } 22050c86d0fdSDavid Gibson if (max_cpus % smp_threads) { 22060c86d0fdSDavid Gibson error_report("max_cpus (%u) must be multiple of threads (%u)", 22070c86d0fdSDavid Gibson max_cpus, smp_threads); 22080c86d0fdSDavid Gibson exit(1); 22090c86d0fdSDavid Gibson } 22100c86d0fdSDavid Gibson } else { 22110c86d0fdSDavid Gibson if (max_cpus != smp_cpus) { 22120c86d0fdSDavid Gibson error_report("This machine version does not support CPU hotplug"); 22130c86d0fdSDavid Gibson exit(1); 22140c86d0fdSDavid Gibson } 2215535455fdSIgor Mammedov boot_cores_nr = possible_cpus->len; 22160c86d0fdSDavid Gibson } 22170c86d0fdSDavid Gibson 2218535455fdSIgor Mammedov for (i = 0; i < possible_cpus->len; i++) { 22190c86d0fdSDavid Gibson int core_id = i * smp_threads; 22200c86d0fdSDavid Gibson 2221c5514d0eSIgor Mammedov if (mc->has_hotpluggable_cpus) { 22222d335818SDavid Gibson spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU, 22230c86d0fdSDavid Gibson (core_id / smp_threads) * smt); 22240c86d0fdSDavid Gibson } 22250c86d0fdSDavid Gibson 2226535455fdSIgor Mammedov if (i < boot_cores_nr) { 22270c86d0fdSDavid Gibson Object *core = object_new(type); 22280c86d0fdSDavid Gibson int nr_threads = smp_threads; 22290c86d0fdSDavid Gibson 22300c86d0fdSDavid Gibson /* Handle the partially filled core for older machine types */ 22310c86d0fdSDavid Gibson if ((i + 1) * smp_threads >= smp_cpus) { 22320c86d0fdSDavid Gibson nr_threads = smp_cpus - i * smp_threads; 22330c86d0fdSDavid Gibson } 22340c86d0fdSDavid Gibson 22350c86d0fdSDavid Gibson object_property_set_int(core, nr_threads, "nr-threads", 22360c86d0fdSDavid Gibson &error_fatal); 22370c86d0fdSDavid Gibson object_property_set_int(core, core_id, CPU_CORE_PROP_CORE_ID, 22380c86d0fdSDavid Gibson &error_fatal); 22390c86d0fdSDavid Gibson object_property_set_bool(core, true, "realized", &error_fatal); 22400c86d0fdSDavid Gibson } 22410c86d0fdSDavid Gibson } 22420c86d0fdSDavid Gibson } 22430c86d0fdSDavid Gibson 2244fa98fbfcSSam Bobroff static void spapr_set_vsmt_mode(sPAPRMachineState *spapr, Error **errp) 2245fa98fbfcSSam Bobroff { 2246fa98fbfcSSam Bobroff Error *local_err = NULL; 2247fa98fbfcSSam Bobroff bool vsmt_user = !!spapr->vsmt; 2248fa98fbfcSSam Bobroff int kvm_smt = kvmppc_smt_threads(); 2249fa98fbfcSSam Bobroff int ret; 2250fa98fbfcSSam Bobroff 2251fa98fbfcSSam Bobroff if (!kvm_enabled() && (smp_threads > 1)) { 2252fa98fbfcSSam Bobroff error_setg(&local_err, "TCG cannot support more than 1 thread/core " 2253fa98fbfcSSam Bobroff "on a pseries machine"); 2254fa98fbfcSSam Bobroff goto out; 2255fa98fbfcSSam Bobroff } 2256fa98fbfcSSam Bobroff if (!is_power_of_2(smp_threads)) { 2257fa98fbfcSSam Bobroff error_setg(&local_err, "Cannot support %d threads/core on a pseries " 2258fa98fbfcSSam Bobroff "machine because it must be a power of 2", smp_threads); 2259fa98fbfcSSam Bobroff goto out; 2260fa98fbfcSSam Bobroff } 2261fa98fbfcSSam Bobroff 2262fa98fbfcSSam Bobroff /* Detemine the VSMT mode to use: */ 2263fa98fbfcSSam Bobroff if (vsmt_user) { 2264fa98fbfcSSam Bobroff if (spapr->vsmt < smp_threads) { 2265fa98fbfcSSam Bobroff error_setg(&local_err, "Cannot support VSMT mode %d" 2266fa98fbfcSSam Bobroff " because it must be >= threads/core (%d)", 2267fa98fbfcSSam Bobroff spapr->vsmt, smp_threads); 2268fa98fbfcSSam Bobroff goto out; 2269fa98fbfcSSam Bobroff } 2270fa98fbfcSSam Bobroff /* In this case, spapr->vsmt has been set by the command line */ 2271fa98fbfcSSam Bobroff } else { 2272fa98fbfcSSam Bobroff /* Choose a VSMT mode that may be higher than necessary but is 2273fa98fbfcSSam Bobroff * likely to be compatible with hosts that don't have VSMT. */ 2274fa98fbfcSSam Bobroff spapr->vsmt = MAX(kvm_smt, smp_threads); 2275fa98fbfcSSam Bobroff } 2276fa98fbfcSSam Bobroff 2277fa98fbfcSSam Bobroff /* KVM: If necessary, set the SMT mode: */ 2278fa98fbfcSSam Bobroff if (kvm_enabled() && (spapr->vsmt != kvm_smt)) { 2279fa98fbfcSSam Bobroff ret = kvmppc_set_smt_threads(spapr->vsmt); 2280fa98fbfcSSam Bobroff if (ret) { 2281fa98fbfcSSam Bobroff error_setg(&local_err, 2282fa98fbfcSSam Bobroff "Failed to set KVM's VSMT mode to %d (errno %d)", 2283fa98fbfcSSam Bobroff spapr->vsmt, ret); 2284fa98fbfcSSam Bobroff if (!vsmt_user) { 2285fa98fbfcSSam Bobroff error_append_hint(&local_err, "On PPC, a VM with %d threads/" 2286fa98fbfcSSam Bobroff "core on a host with %d threads/core requires " 2287fa98fbfcSSam Bobroff " the use of VSMT mode %d.\n", 2288fa98fbfcSSam Bobroff smp_threads, kvm_smt, spapr->vsmt); 2289fa98fbfcSSam Bobroff } 2290fa98fbfcSSam Bobroff kvmppc_hint_smt_possible(&local_err); 2291fa98fbfcSSam Bobroff goto out; 2292fa98fbfcSSam Bobroff } 2293fa98fbfcSSam Bobroff } 2294fa98fbfcSSam Bobroff /* else TCG: nothing to do currently */ 2295fa98fbfcSSam Bobroff out: 2296fa98fbfcSSam Bobroff error_propagate(errp, local_err); 2297fa98fbfcSSam Bobroff } 2298fa98fbfcSSam Bobroff 229953018216SPaolo Bonzini /* pSeries LPAR / sPAPR hardware init */ 2300bcb5ce08SDavid Gibson static void spapr_machine_init(MachineState *machine) 230153018216SPaolo Bonzini { 230228e02042SDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(machine); 2303224245bfSDavid Gibson sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 23043ef96221SMarcel Apfelbaum const char *kernel_filename = machine->kernel_filename; 23053ef96221SMarcel Apfelbaum const char *initrd_filename = machine->initrd_filename; 230653018216SPaolo Bonzini PCIHostState *phb; 230753018216SPaolo Bonzini int i; 230853018216SPaolo Bonzini MemoryRegion *sysmem = get_system_memory(); 230953018216SPaolo Bonzini MemoryRegion *ram = g_new(MemoryRegion, 1); 2310658fa66bSAlexey Kardashevskiy MemoryRegion *rma_region; 2311658fa66bSAlexey Kardashevskiy void *rma = NULL; 231253018216SPaolo Bonzini hwaddr rma_alloc_size; 2313c86c1affSDaniel Henrique Barboza hwaddr node0_size = spapr_node0_size(machine); 2314b7d1f77aSBenjamin Herrenschmidt long load_limit, fw_size; 231553018216SPaolo Bonzini char *filename; 231630f4b05bSDavid Gibson Error *resize_hpt_err = NULL; 231753018216SPaolo Bonzini 231833face6bSDavid Gibson spapr_caps_validate(spapr, &error_fatal); 231933face6bSDavid Gibson 2320226419d6SMichael S. Tsirkin msi_nonbroken = true; 232153018216SPaolo Bonzini 232253018216SPaolo Bonzini QLIST_INIT(&spapr->phbs); 23230cffce56SDavid Gibson QTAILQ_INIT(&spapr->pending_dimm_unplugs); 232453018216SPaolo Bonzini 232530f4b05bSDavid Gibson /* Check HPT resizing availability */ 232630f4b05bSDavid Gibson kvmppc_check_papr_resize_hpt(&resize_hpt_err); 232730f4b05bSDavid Gibson if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) { 232830f4b05bSDavid Gibson /* 232930f4b05bSDavid Gibson * If the user explicitly requested a mode we should either 233030f4b05bSDavid Gibson * supply it, or fail completely (which we do below). But if 233130f4b05bSDavid Gibson * it's not set explicitly, we reset our mode to something 233230f4b05bSDavid Gibson * that works 233330f4b05bSDavid Gibson */ 233430f4b05bSDavid Gibson if (resize_hpt_err) { 233530f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 233630f4b05bSDavid Gibson error_free(resize_hpt_err); 233730f4b05bSDavid Gibson resize_hpt_err = NULL; 233830f4b05bSDavid Gibson } else { 233930f4b05bSDavid Gibson spapr->resize_hpt = smc->resize_hpt_default; 234030f4b05bSDavid Gibson } 234130f4b05bSDavid Gibson } 234230f4b05bSDavid Gibson 234330f4b05bSDavid Gibson assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT); 234430f4b05bSDavid Gibson 234530f4b05bSDavid Gibson if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) { 234630f4b05bSDavid Gibson /* 234730f4b05bSDavid Gibson * User requested HPT resize, but this host can't supply it. Bail out 234830f4b05bSDavid Gibson */ 234930f4b05bSDavid Gibson error_report_err(resize_hpt_err); 235030f4b05bSDavid Gibson exit(1); 235130f4b05bSDavid Gibson } 235230f4b05bSDavid Gibson 235353018216SPaolo Bonzini /* Allocate RMA if necessary */ 2354658fa66bSAlexey Kardashevskiy rma_alloc_size = kvmppc_alloc_rma(&rma); 235553018216SPaolo Bonzini 235653018216SPaolo Bonzini if (rma_alloc_size == -1) { 2357730fce59SThomas Huth error_report("Unable to create RMA"); 235853018216SPaolo Bonzini exit(1); 235953018216SPaolo Bonzini } 236053018216SPaolo Bonzini 2361c4177479SAlexey Kardashevskiy if (rma_alloc_size && (rma_alloc_size < node0_size)) { 236253018216SPaolo Bonzini spapr->rma_size = rma_alloc_size; 236353018216SPaolo Bonzini } else { 2364c4177479SAlexey Kardashevskiy spapr->rma_size = node0_size; 236553018216SPaolo Bonzini 236653018216SPaolo Bonzini /* With KVM, we don't actually know whether KVM supports an 236753018216SPaolo Bonzini * unbounded RMA (PR KVM) or is limited by the hash table size 236853018216SPaolo Bonzini * (HV KVM using VRMA), so we always assume the latter 236953018216SPaolo Bonzini * 237053018216SPaolo Bonzini * In that case, we also limit the initial allocations for RTAS 237153018216SPaolo Bonzini * etc... to 256M since we have no way to know what the VRMA size 237253018216SPaolo Bonzini * is going to be as it depends on the size of the hash table 237353018216SPaolo Bonzini * isn't determined yet. 237453018216SPaolo Bonzini */ 237553018216SPaolo Bonzini if (kvm_enabled()) { 237653018216SPaolo Bonzini spapr->vrma_adjust = 1; 237753018216SPaolo Bonzini spapr->rma_size = MIN(spapr->rma_size, 0x10000000); 237853018216SPaolo Bonzini } 2379912acdf4SBenjamin Herrenschmidt 2380912acdf4SBenjamin Herrenschmidt /* Actually we don't support unbounded RMA anymore since we 2381912acdf4SBenjamin Herrenschmidt * added proper emulation of HV mode. The max we can get is 2382912acdf4SBenjamin Herrenschmidt * 16G which also happens to be what we configure for PAPR 2383912acdf4SBenjamin Herrenschmidt * mode so make sure we don't do anything bigger than that 2384912acdf4SBenjamin Herrenschmidt */ 2385912acdf4SBenjamin Herrenschmidt spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull); 238653018216SPaolo Bonzini } 238753018216SPaolo Bonzini 2388c4177479SAlexey Kardashevskiy if (spapr->rma_size > node0_size) { 2389d54e4d76SDavid Gibson error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")", 2390c4177479SAlexey Kardashevskiy spapr->rma_size); 2391c4177479SAlexey Kardashevskiy exit(1); 2392c4177479SAlexey Kardashevskiy } 2393c4177479SAlexey Kardashevskiy 2394b7d1f77aSBenjamin Herrenschmidt /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */ 2395b7d1f77aSBenjamin Herrenschmidt load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD; 239653018216SPaolo Bonzini 23977b565160SDavid Gibson /* Set up Interrupt Controller before we create the VCPUs */ 239871cd4dacSCédric Le Goater xics_system_init(machine, XICS_IRQS_SPAPR, &error_fatal); 23997b565160SDavid Gibson 2400dc1b5eeeSGreg Kurz /* Set up containers for ibm,client-architecture-support negotiated options 2401dc1b5eeeSGreg Kurz */ 2402facdb8b6SMichael Roth spapr->ov5 = spapr_ovec_new(); 2403facdb8b6SMichael Roth spapr->ov5_cas = spapr_ovec_new(); 2404facdb8b6SMichael Roth 2405224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2406facdb8b6SMichael Roth spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); 24077c150d6fSDavid Gibson spapr_validate_node_memory(machine, &error_fatal); 2408224245bfSDavid Gibson } 2409224245bfSDavid Gibson 2410417ece33SMichael Roth spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); 2411545d6e2bSSuraj Jitindar Singh if (!kvm_enabled() || kvmppc_has_cap_mmu_radix()) { 2412545d6e2bSSuraj Jitindar Singh /* KVM and TCG always allow GTSE with radix... */ 24139fb4541fSSam Bobroff spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE); 24149fb4541fSSam Bobroff } 24159fb4541fSSam Bobroff /* ... but not with hash (currently). */ 2416417ece33SMichael Roth 2417ffbb1705SMichael Roth /* advertise support for dedicated HP event source to guests */ 2418ffbb1705SMichael Roth if (spapr->use_hotplug_event_source) { 2419ffbb1705SMichael Roth spapr_ovec_set(spapr->ov5, OV5_HP_EVT); 2420ffbb1705SMichael Roth } 2421ffbb1705SMichael Roth 24222772cf6bSDavid Gibson /* advertise support for HPT resizing */ 24232772cf6bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 24242772cf6bSDavid Gibson spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE); 24252772cf6bSDavid Gibson } 24262772cf6bSDavid Gibson 242753018216SPaolo Bonzini /* init CPUs */ 2428fa98fbfcSSam Bobroff spapr_set_vsmt_mode(spapr, &error_fatal); 2429fa98fbfcSSam Bobroff 24300c86d0fdSDavid Gibson spapr_init_cpus(spapr); 243153018216SPaolo Bonzini 2432026bfd89SDavid Gibson if (kvm_enabled()) { 2433026bfd89SDavid Gibson /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */ 2434026bfd89SDavid Gibson kvmppc_enable_logical_ci_hcalls(); 2435ef9971ddSAlexey Kardashevskiy kvmppc_enable_set_mode_hcall(); 24365145ad4fSNathan Whitehorn 24375145ad4fSNathan Whitehorn /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */ 24385145ad4fSNathan Whitehorn kvmppc_enable_clear_ref_mod_hcalls(); 2439026bfd89SDavid Gibson } 2440026bfd89SDavid Gibson 244153018216SPaolo Bonzini /* allocate RAM */ 2442f92f5da1SAlexey Kardashevskiy memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram", 2443fb164994SDavid Gibson machine->ram_size); 2444f92f5da1SAlexey Kardashevskiy memory_region_add_subregion(sysmem, 0, ram); 244553018216SPaolo Bonzini 2446658fa66bSAlexey Kardashevskiy if (rma_alloc_size && rma) { 2447658fa66bSAlexey Kardashevskiy rma_region = g_new(MemoryRegion, 1); 2448658fa66bSAlexey Kardashevskiy memory_region_init_ram_ptr(rma_region, NULL, "ppc_spapr.rma", 2449658fa66bSAlexey Kardashevskiy rma_alloc_size, rma); 2450658fa66bSAlexey Kardashevskiy vmstate_register_ram_global(rma_region); 2451658fa66bSAlexey Kardashevskiy memory_region_add_subregion(sysmem, 0, rma_region); 2452658fa66bSAlexey Kardashevskiy } 2453658fa66bSAlexey Kardashevskiy 24544a1c9cf0SBharata B Rao /* initialize hotplug memory address space */ 24554a1c9cf0SBharata B Rao if (machine->ram_size < machine->maxram_size) { 24564a1c9cf0SBharata B Rao ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size; 245771c9a3ddSBharata B Rao /* 245871c9a3ddSBharata B Rao * Limit the number of hotpluggable memory slots to half the number 245971c9a3ddSBharata B Rao * slots that KVM supports, leaving the other half for PCI and other 246071c9a3ddSBharata B Rao * devices. However ensure that number of slots doesn't drop below 32. 246171c9a3ddSBharata B Rao */ 246271c9a3ddSBharata B Rao int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 : 246371c9a3ddSBharata B Rao SPAPR_MAX_RAM_SLOTS; 24644a1c9cf0SBharata B Rao 246571c9a3ddSBharata B Rao if (max_memslots < SPAPR_MAX_RAM_SLOTS) { 246671c9a3ddSBharata B Rao max_memslots = SPAPR_MAX_RAM_SLOTS; 246771c9a3ddSBharata B Rao } 246871c9a3ddSBharata B Rao if (machine->ram_slots > max_memslots) { 2469d54e4d76SDavid Gibson error_report("Specified number of memory slots %" 2470d54e4d76SDavid Gibson PRIu64" exceeds max supported %d", 247171c9a3ddSBharata B Rao machine->ram_slots, max_memslots); 2472d54e4d76SDavid Gibson exit(1); 24734a1c9cf0SBharata B Rao } 24744a1c9cf0SBharata B Rao 24754a1c9cf0SBharata B Rao spapr->hotplug_memory.base = ROUND_UP(machine->ram_size, 24764a1c9cf0SBharata B Rao SPAPR_HOTPLUG_MEM_ALIGN); 24774a1c9cf0SBharata B Rao memory_region_init(&spapr->hotplug_memory.mr, OBJECT(spapr), 24784a1c9cf0SBharata B Rao "hotplug-memory", hotplug_mem_size); 24794a1c9cf0SBharata B Rao memory_region_add_subregion(sysmem, spapr->hotplug_memory.base, 24804a1c9cf0SBharata B Rao &spapr->hotplug_memory.mr); 24814a1c9cf0SBharata B Rao } 24824a1c9cf0SBharata B Rao 2483224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2484224245bfSDavid Gibson spapr_create_lmb_dr_connectors(spapr); 2485224245bfSDavid Gibson } 2486224245bfSDavid Gibson 248753018216SPaolo Bonzini filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin"); 24884c56440dSStefan Weil if (!filename) { 2489730fce59SThomas Huth error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin"); 24904c56440dSStefan Weil exit(1); 24914c56440dSStefan Weil } 2492b7d1f77aSBenjamin Herrenschmidt spapr->rtas_size = get_image_size(filename); 24938afc22a2SZhou Jie if (spapr->rtas_size < 0) { 24948afc22a2SZhou Jie error_report("Could not get size of LPAR rtas '%s'", filename); 24958afc22a2SZhou Jie exit(1); 24968afc22a2SZhou Jie } 2497b7d1f77aSBenjamin Herrenschmidt spapr->rtas_blob = g_malloc(spapr->rtas_size); 2498b7d1f77aSBenjamin Herrenschmidt if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) { 2499730fce59SThomas Huth error_report("Could not load LPAR rtas '%s'", filename); 250053018216SPaolo Bonzini exit(1); 250153018216SPaolo Bonzini } 250253018216SPaolo Bonzini if (spapr->rtas_size > RTAS_MAX_SIZE) { 2503730fce59SThomas Huth error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)", 25042f285bddSPeter Maydell (size_t)spapr->rtas_size, RTAS_MAX_SIZE); 250553018216SPaolo Bonzini exit(1); 250653018216SPaolo Bonzini } 250753018216SPaolo Bonzini g_free(filename); 250853018216SPaolo Bonzini 2509ffbb1705SMichael Roth /* Set up RTAS event infrastructure */ 251053018216SPaolo Bonzini spapr_events_init(spapr); 251153018216SPaolo Bonzini 251212f42174SDavid Gibson /* Set up the RTC RTAS interfaces */ 251328df36a1SDavid Gibson spapr_rtc_create(spapr); 251412f42174SDavid Gibson 251553018216SPaolo Bonzini /* Set up VIO bus */ 251653018216SPaolo Bonzini spapr->vio_bus = spapr_vio_bus_init(); 251753018216SPaolo Bonzini 251853018216SPaolo Bonzini for (i = 0; i < MAX_SERIAL_PORTS; i++) { 251953018216SPaolo Bonzini if (serial_hds[i]) { 252053018216SPaolo Bonzini spapr_vty_create(spapr->vio_bus, serial_hds[i]); 252153018216SPaolo Bonzini } 252253018216SPaolo Bonzini } 252353018216SPaolo Bonzini 252453018216SPaolo Bonzini /* We always have at least the nvram device on VIO */ 252553018216SPaolo Bonzini spapr_create_nvram(spapr); 252653018216SPaolo Bonzini 252753018216SPaolo Bonzini /* Set up PCI */ 252853018216SPaolo Bonzini spapr_pci_rtas_init(); 252953018216SPaolo Bonzini 253089dfd6e1SDavid Gibson phb = spapr_create_phb(spapr, 0); 253153018216SPaolo Bonzini 253253018216SPaolo Bonzini for (i = 0; i < nb_nics; i++) { 253353018216SPaolo Bonzini NICInfo *nd = &nd_table[i]; 253453018216SPaolo Bonzini 253553018216SPaolo Bonzini if (!nd->model) { 253653018216SPaolo Bonzini nd->model = g_strdup("ibmveth"); 253753018216SPaolo Bonzini } 253853018216SPaolo Bonzini 253953018216SPaolo Bonzini if (strcmp(nd->model, "ibmveth") == 0) { 254053018216SPaolo Bonzini spapr_vlan_create(spapr->vio_bus, nd); 254153018216SPaolo Bonzini } else { 254229b358f9SDavid Gibson pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); 254353018216SPaolo Bonzini } 254453018216SPaolo Bonzini } 254553018216SPaolo Bonzini 254653018216SPaolo Bonzini for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { 254753018216SPaolo Bonzini spapr_vscsi_create(spapr->vio_bus); 254853018216SPaolo Bonzini } 254953018216SPaolo Bonzini 255053018216SPaolo Bonzini /* Graphics */ 255114c6a894SDavid Gibson if (spapr_vga_init(phb->bus, &error_fatal)) { 255253018216SPaolo Bonzini spapr->has_graphics = true; 2553c6e76503SPaolo Bonzini machine->usb |= defaults_enabled() && !machine->usb_disabled; 255453018216SPaolo Bonzini } 255553018216SPaolo Bonzini 25564ee9ced9SMarcel Apfelbaum if (machine->usb) { 255757040d45SThomas Huth if (smc->use_ohci_by_default) { 255853018216SPaolo Bonzini pci_create_simple(phb->bus, -1, "pci-ohci"); 255957040d45SThomas Huth } else { 256057040d45SThomas Huth pci_create_simple(phb->bus, -1, "nec-usb-xhci"); 256157040d45SThomas Huth } 2562c86580b8SMarkus Armbruster 256353018216SPaolo Bonzini if (spapr->has_graphics) { 2564c86580b8SMarkus Armbruster USBBus *usb_bus = usb_bus_find(-1); 2565c86580b8SMarkus Armbruster 2566c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-kbd"); 2567c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-mouse"); 256853018216SPaolo Bonzini } 256953018216SPaolo Bonzini } 257053018216SPaolo Bonzini 257153018216SPaolo Bonzini if (spapr->rma_size < (MIN_RMA_SLOF << 20)) { 2572d54e4d76SDavid Gibson error_report( 2573d54e4d76SDavid Gibson "pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)", 2574d54e4d76SDavid Gibson MIN_RMA_SLOF); 257553018216SPaolo Bonzini exit(1); 257653018216SPaolo Bonzini } 257753018216SPaolo Bonzini 257853018216SPaolo Bonzini if (kernel_filename) { 257953018216SPaolo Bonzini uint64_t lowaddr = 0; 258053018216SPaolo Bonzini 2581a19f7fb0SDavid Gibson spapr->kernel_size = load_elf(kernel_filename, translate_kernel_address, 2582a19f7fb0SDavid Gibson NULL, NULL, &lowaddr, NULL, 1, 2583a19f7fb0SDavid Gibson PPC_ELF_MACHINE, 0, 0); 2584a19f7fb0SDavid Gibson if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) { 2585a19f7fb0SDavid Gibson spapr->kernel_size = load_elf(kernel_filename, 2586a19f7fb0SDavid Gibson translate_kernel_address, NULL, NULL, 2587a19f7fb0SDavid Gibson &lowaddr, NULL, 0, PPC_ELF_MACHINE, 25887ef295eaSPeter Crosthwaite 0, 0); 2589a19f7fb0SDavid Gibson spapr->kernel_le = spapr->kernel_size > 0; 259016457e7fSBenjamin Herrenschmidt } 2591a19f7fb0SDavid Gibson if (spapr->kernel_size < 0) { 2592a19f7fb0SDavid Gibson error_report("error loading %s: %s", kernel_filename, 2593a19f7fb0SDavid Gibson load_elf_strerror(spapr->kernel_size)); 259453018216SPaolo Bonzini exit(1); 259553018216SPaolo Bonzini } 259653018216SPaolo Bonzini 259753018216SPaolo Bonzini /* load initrd */ 259853018216SPaolo Bonzini if (initrd_filename) { 259953018216SPaolo Bonzini /* Try to locate the initrd in the gap between the kernel 260053018216SPaolo Bonzini * and the firmware. Add a bit of space just in case 260153018216SPaolo Bonzini */ 2602a19f7fb0SDavid Gibson spapr->initrd_base = (KERNEL_LOAD_ADDR + spapr->kernel_size 2603a19f7fb0SDavid Gibson + 0x1ffff) & ~0xffff; 2604a19f7fb0SDavid Gibson spapr->initrd_size = load_image_targphys(initrd_filename, 2605a19f7fb0SDavid Gibson spapr->initrd_base, 2606a19f7fb0SDavid Gibson load_limit 2607a19f7fb0SDavid Gibson - spapr->initrd_base); 2608a19f7fb0SDavid Gibson if (spapr->initrd_size < 0) { 2609d54e4d76SDavid Gibson error_report("could not load initial ram disk '%s'", 261053018216SPaolo Bonzini initrd_filename); 261153018216SPaolo Bonzini exit(1); 261253018216SPaolo Bonzini } 261353018216SPaolo Bonzini } 261453018216SPaolo Bonzini } 261553018216SPaolo Bonzini 26168e7ea787SAndreas Färber if (bios_name == NULL) { 26178e7ea787SAndreas Färber bios_name = FW_FILE_NAME; 26188e7ea787SAndreas Färber } 26198e7ea787SAndreas Färber filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 26204c56440dSStefan Weil if (!filename) { 262168fea5a0SThomas Huth error_report("Could not find LPAR firmware '%s'", bios_name); 26224c56440dSStefan Weil exit(1); 26234c56440dSStefan Weil } 262453018216SPaolo Bonzini fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); 262568fea5a0SThomas Huth if (fw_size <= 0) { 262668fea5a0SThomas Huth error_report("Could not load LPAR firmware '%s'", filename); 262753018216SPaolo Bonzini exit(1); 262853018216SPaolo Bonzini } 262953018216SPaolo Bonzini g_free(filename); 263053018216SPaolo Bonzini 263128e02042SDavid Gibson /* FIXME: Should register things through the MachineState's qdev 263228e02042SDavid Gibson * interface, this is a legacy from the sPAPREnvironment structure 263328e02042SDavid Gibson * which predated MachineState but had a similar function */ 26344be21d56SDavid Gibson vmstate_register(NULL, 0, &vmstate_spapr, spapr); 26354be21d56SDavid Gibson register_savevm_live(NULL, "spapr/htab", -1, 1, 26364be21d56SDavid Gibson &savevm_htab_handlers, spapr); 26374be21d56SDavid Gibson 26385b2128d2SAlexander Graf qemu_register_boot_set(spapr_boot_set, spapr); 263942043e4fSLaurent Vivier 264042043e4fSLaurent Vivier if (kvm_enabled()) { 26413dc410aeSAlexey Kardashevskiy /* to stop and start vmclock */ 264242043e4fSLaurent Vivier qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change, 264342043e4fSLaurent Vivier &spapr->tb); 26443dc410aeSAlexey Kardashevskiy 26453dc410aeSAlexey Kardashevskiy kvmppc_spapr_enable_inkernel_multitce(); 264642043e4fSLaurent Vivier } 264753018216SPaolo Bonzini } 264853018216SPaolo Bonzini 2649135a129aSAneesh Kumar K.V static int spapr_kvm_type(const char *vm_type) 2650135a129aSAneesh Kumar K.V { 2651135a129aSAneesh Kumar K.V if (!vm_type) { 2652135a129aSAneesh Kumar K.V return 0; 2653135a129aSAneesh Kumar K.V } 2654135a129aSAneesh Kumar K.V 2655135a129aSAneesh Kumar K.V if (!strcmp(vm_type, "HV")) { 2656135a129aSAneesh Kumar K.V return 1; 2657135a129aSAneesh Kumar K.V } 2658135a129aSAneesh Kumar K.V 2659135a129aSAneesh Kumar K.V if (!strcmp(vm_type, "PR")) { 2660135a129aSAneesh Kumar K.V return 2; 2661135a129aSAneesh Kumar K.V } 2662135a129aSAneesh Kumar K.V 2663135a129aSAneesh Kumar K.V error_report("Unknown kvm-type specified '%s'", vm_type); 2664135a129aSAneesh Kumar K.V exit(1); 2665135a129aSAneesh Kumar K.V } 2666135a129aSAneesh Kumar K.V 266771461b0fSAlexey Kardashevskiy /* 2668627b84f4SGonglei * Implementation of an interface to adjust firmware path 266971461b0fSAlexey Kardashevskiy * for the bootindex property handling. 267071461b0fSAlexey Kardashevskiy */ 267171461b0fSAlexey Kardashevskiy static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus, 267271461b0fSAlexey Kardashevskiy DeviceState *dev) 267371461b0fSAlexey Kardashevskiy { 267471461b0fSAlexey Kardashevskiy #define CAST(type, obj, name) \ 267571461b0fSAlexey Kardashevskiy ((type *)object_dynamic_cast(OBJECT(obj), (name))) 267671461b0fSAlexey Kardashevskiy SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE); 267771461b0fSAlexey Kardashevskiy sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE); 2678c4e13492SFelipe Franciosi VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON); 267971461b0fSAlexey Kardashevskiy 268071461b0fSAlexey Kardashevskiy if (d) { 268171461b0fSAlexey Kardashevskiy void *spapr = CAST(void, bus->parent, "spapr-vscsi"); 268271461b0fSAlexey Kardashevskiy VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI); 268371461b0fSAlexey Kardashevskiy USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE); 268471461b0fSAlexey Kardashevskiy 268571461b0fSAlexey Kardashevskiy if (spapr) { 268671461b0fSAlexey Kardashevskiy /* 268771461b0fSAlexey Kardashevskiy * Replace "channel@0/disk@0,0" with "disk@8000000000000000": 268871461b0fSAlexey Kardashevskiy * We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun 268971461b0fSAlexey Kardashevskiy * in the top 16 bits of the 64-bit LUN 269071461b0fSAlexey Kardashevskiy */ 269171461b0fSAlexey Kardashevskiy unsigned id = 0x8000 | (d->id << 8) | d->lun; 269271461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 269371461b0fSAlexey Kardashevskiy (uint64_t)id << 48); 269471461b0fSAlexey Kardashevskiy } else if (virtio) { 269571461b0fSAlexey Kardashevskiy /* 269671461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (target << 8) | lun 269771461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 269871461b0fSAlexey Kardashevskiy * Note: the quote above is from SLOF and it is wrong, 269971461b0fSAlexey Kardashevskiy * the actual binding is: 270071461b0fSAlexey Kardashevskiy * swap 0100 or 10 << or 20 << ( target lun-id -- srplun ) 270171461b0fSAlexey Kardashevskiy */ 270271461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (d->id << 16) | d->lun; 2703bac658d1SThomas Huth if (d->lun >= 256) { 2704bac658d1SThomas Huth /* Use the LUN "flat space addressing method" */ 2705bac658d1SThomas Huth id |= 0x4000; 2706bac658d1SThomas Huth } 270771461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 270871461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 270971461b0fSAlexey Kardashevskiy } else if (usb) { 271071461b0fSAlexey Kardashevskiy /* 271171461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (usb-port << 16) | lun 271271461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 271371461b0fSAlexey Kardashevskiy */ 271471461b0fSAlexey Kardashevskiy unsigned usb_port = atoi(usb->port->path); 271571461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (usb_port << 16) | d->lun; 271671461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 271771461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 271871461b0fSAlexey Kardashevskiy } 271971461b0fSAlexey Kardashevskiy } 272071461b0fSAlexey Kardashevskiy 2721b99260ebSThomas Huth /* 2722b99260ebSThomas Huth * SLOF probes the USB devices, and if it recognizes that the device is a 2723b99260ebSThomas Huth * storage device, it changes its name to "storage" instead of "usb-host", 2724b99260ebSThomas Huth * and additionally adds a child node for the SCSI LUN, so the correct 2725b99260ebSThomas Huth * boot path in SLOF is something like .../storage@1/disk@xxx" instead. 2726b99260ebSThomas Huth */ 2727b99260ebSThomas Huth if (strcmp("usb-host", qdev_fw_name(dev)) == 0) { 2728b99260ebSThomas Huth USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE); 2729b99260ebSThomas Huth if (usb_host_dev_is_scsi_storage(usbdev)) { 2730b99260ebSThomas Huth return g_strdup_printf("storage@%s/disk", usbdev->port->path); 2731b99260ebSThomas Huth } 2732b99260ebSThomas Huth } 2733b99260ebSThomas Huth 273471461b0fSAlexey Kardashevskiy if (phb) { 273571461b0fSAlexey Kardashevskiy /* Replace "pci" with "pci@800000020000000" */ 273671461b0fSAlexey Kardashevskiy return g_strdup_printf("pci@%"PRIX64, phb->buid); 273771461b0fSAlexey Kardashevskiy } 273871461b0fSAlexey Kardashevskiy 2739c4e13492SFelipe Franciosi if (vsc) { 2740c4e13492SFelipe Franciosi /* Same logic as virtio above */ 2741c4e13492SFelipe Franciosi unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun; 2742c4e13492SFelipe Franciosi return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32); 2743c4e13492SFelipe Franciosi } 2744c4e13492SFelipe Franciosi 27454871dd4cSThomas Huth if (g_str_equal("pci-bridge", qdev_fw_name(dev))) { 27464871dd4cSThomas Huth /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */ 27474871dd4cSThomas Huth PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); 27484871dd4cSThomas Huth return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn)); 27494871dd4cSThomas Huth } 27504871dd4cSThomas Huth 275171461b0fSAlexey Kardashevskiy return NULL; 275271461b0fSAlexey Kardashevskiy } 275371461b0fSAlexey Kardashevskiy 275423825581SEduardo Habkost static char *spapr_get_kvm_type(Object *obj, Error **errp) 275523825581SEduardo Habkost { 275628e02042SDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(obj); 275723825581SEduardo Habkost 275828e02042SDavid Gibson return g_strdup(spapr->kvm_type); 275923825581SEduardo Habkost } 276023825581SEduardo Habkost 276123825581SEduardo Habkost static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp) 276223825581SEduardo Habkost { 276328e02042SDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(obj); 276423825581SEduardo Habkost 276528e02042SDavid Gibson g_free(spapr->kvm_type); 276628e02042SDavid Gibson spapr->kvm_type = g_strdup(value); 276723825581SEduardo Habkost } 276823825581SEduardo Habkost 2769f6229214SMichael Roth static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp) 2770f6229214SMichael Roth { 2771f6229214SMichael Roth sPAPRMachineState *spapr = SPAPR_MACHINE(obj); 2772f6229214SMichael Roth 2773f6229214SMichael Roth return spapr->use_hotplug_event_source; 2774f6229214SMichael Roth } 2775f6229214SMichael Roth 2776f6229214SMichael Roth static void spapr_set_modern_hotplug_events(Object *obj, bool value, 2777f6229214SMichael Roth Error **errp) 2778f6229214SMichael Roth { 2779f6229214SMichael Roth sPAPRMachineState *spapr = SPAPR_MACHINE(obj); 2780f6229214SMichael Roth 2781f6229214SMichael Roth spapr->use_hotplug_event_source = value; 2782f6229214SMichael Roth } 2783f6229214SMichael Roth 278430f4b05bSDavid Gibson static char *spapr_get_resize_hpt(Object *obj, Error **errp) 278530f4b05bSDavid Gibson { 278630f4b05bSDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(obj); 278730f4b05bSDavid Gibson 278830f4b05bSDavid Gibson switch (spapr->resize_hpt) { 278930f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DEFAULT: 279030f4b05bSDavid Gibson return g_strdup("default"); 279130f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DISABLED: 279230f4b05bSDavid Gibson return g_strdup("disabled"); 279330f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_ENABLED: 279430f4b05bSDavid Gibson return g_strdup("enabled"); 279530f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_REQUIRED: 279630f4b05bSDavid Gibson return g_strdup("required"); 279730f4b05bSDavid Gibson } 279830f4b05bSDavid Gibson g_assert_not_reached(); 279930f4b05bSDavid Gibson } 280030f4b05bSDavid Gibson 280130f4b05bSDavid Gibson static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp) 280230f4b05bSDavid Gibson { 280330f4b05bSDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(obj); 280430f4b05bSDavid Gibson 280530f4b05bSDavid Gibson if (strcmp(value, "default") == 0) { 280630f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT; 280730f4b05bSDavid Gibson } else if (strcmp(value, "disabled") == 0) { 280830f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 280930f4b05bSDavid Gibson } else if (strcmp(value, "enabled") == 0) { 281030f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED; 281130f4b05bSDavid Gibson } else if (strcmp(value, "required") == 0) { 281230f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED; 281330f4b05bSDavid Gibson } else { 281430f4b05bSDavid Gibson error_setg(errp, "Bad value for \"resize-hpt\" property"); 281530f4b05bSDavid Gibson } 281630f4b05bSDavid Gibson } 281730f4b05bSDavid Gibson 2818fa98fbfcSSam Bobroff static void spapr_get_vsmt(Object *obj, Visitor *v, const char *name, 2819fa98fbfcSSam Bobroff void *opaque, Error **errp) 2820fa98fbfcSSam Bobroff { 2821fa98fbfcSSam Bobroff visit_type_uint32(v, name, (uint32_t *)opaque, errp); 2822fa98fbfcSSam Bobroff } 2823fa98fbfcSSam Bobroff 2824fa98fbfcSSam Bobroff static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name, 2825fa98fbfcSSam Bobroff void *opaque, Error **errp) 2826fa98fbfcSSam Bobroff { 2827fa98fbfcSSam Bobroff visit_type_uint32(v, name, (uint32_t *)opaque, errp); 2828fa98fbfcSSam Bobroff } 2829fa98fbfcSSam Bobroff 2830bcb5ce08SDavid Gibson static void spapr_instance_init(Object *obj) 283123825581SEduardo Habkost { 2832715c5407SDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(obj); 2833715c5407SDavid Gibson 2834715c5407SDavid Gibson spapr->htab_fd = -1; 2835f6229214SMichael Roth spapr->use_hotplug_event_source = true; 283623825581SEduardo Habkost object_property_add_str(obj, "kvm-type", 283723825581SEduardo Habkost spapr_get_kvm_type, spapr_set_kvm_type, NULL); 283849d2e648SMarcel Apfelbaum object_property_set_description(obj, "kvm-type", 283949d2e648SMarcel Apfelbaum "Specifies the KVM virtualization mode (HV, PR)", 284049d2e648SMarcel Apfelbaum NULL); 2841f6229214SMichael Roth object_property_add_bool(obj, "modern-hotplug-events", 2842f6229214SMichael Roth spapr_get_modern_hotplug_events, 2843f6229214SMichael Roth spapr_set_modern_hotplug_events, 2844f6229214SMichael Roth NULL); 2845f6229214SMichael Roth object_property_set_description(obj, "modern-hotplug-events", 2846f6229214SMichael Roth "Use dedicated hotplug event mechanism in" 2847f6229214SMichael Roth " place of standard EPOW events when possible" 2848f6229214SMichael Roth " (required for memory hot-unplug support)", 2849f6229214SMichael Roth NULL); 28507843c0d6SDavid Gibson 28517843c0d6SDavid Gibson ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr, 28527843c0d6SDavid Gibson "Maximum permitted CPU compatibility mode", 28537843c0d6SDavid Gibson &error_fatal); 285430f4b05bSDavid Gibson 285530f4b05bSDavid Gibson object_property_add_str(obj, "resize-hpt", 285630f4b05bSDavid Gibson spapr_get_resize_hpt, spapr_set_resize_hpt, NULL); 285730f4b05bSDavid Gibson object_property_set_description(obj, "resize-hpt", 285830f4b05bSDavid Gibson "Resizing of the Hash Page Table (enabled, disabled, required)", 285930f4b05bSDavid Gibson NULL); 2860fa98fbfcSSam Bobroff object_property_add(obj, "vsmt", "uint32", spapr_get_vsmt, 2861fa98fbfcSSam Bobroff spapr_set_vsmt, NULL, &spapr->vsmt, &error_abort); 2862fa98fbfcSSam Bobroff object_property_set_description(obj, "vsmt", 2863fa98fbfcSSam Bobroff "Virtual SMT: KVM behaves as if this were" 2864fa98fbfcSSam Bobroff " the host's SMT mode", &error_abort); 286523825581SEduardo Habkost } 286623825581SEduardo Habkost 286787bbdd9cSDavid Gibson static void spapr_machine_finalizefn(Object *obj) 286887bbdd9cSDavid Gibson { 286987bbdd9cSDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(obj); 287087bbdd9cSDavid Gibson 287187bbdd9cSDavid Gibson g_free(spapr->kvm_type); 287287bbdd9cSDavid Gibson } 287387bbdd9cSDavid Gibson 28741c7ad77eSNicholas Piggin void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg) 287534316482SAlexey Kardashevskiy { 287634316482SAlexey Kardashevskiy cpu_synchronize_state(cs); 287734316482SAlexey Kardashevskiy ppc_cpu_do_system_reset(cs); 287834316482SAlexey Kardashevskiy } 287934316482SAlexey Kardashevskiy 288034316482SAlexey Kardashevskiy static void spapr_nmi(NMIState *n, int cpu_index, Error **errp) 288134316482SAlexey Kardashevskiy { 288234316482SAlexey Kardashevskiy CPUState *cs; 288334316482SAlexey Kardashevskiy 288434316482SAlexey Kardashevskiy CPU_FOREACH(cs) { 28851c7ad77eSNicholas Piggin async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); 288634316482SAlexey Kardashevskiy } 288734316482SAlexey Kardashevskiy } 288834316482SAlexey Kardashevskiy 288979b78a6bSMichael Roth static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size, 289079b78a6bSMichael Roth uint32_t node, bool dedicated_hp_event_source, 289179b78a6bSMichael Roth Error **errp) 2892c20d332aSBharata B Rao { 2893c20d332aSBharata B Rao sPAPRDRConnector *drc; 2894c20d332aSBharata B Rao uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE; 2895c20d332aSBharata B Rao int i, fdt_offset, fdt_size; 2896c20d332aSBharata B Rao void *fdt; 289779b78a6bSMichael Roth uint64_t addr = addr_start; 289894fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 2899160bb678SGreg Kurz Error *local_err = NULL; 2900c20d332aSBharata B Rao 2901c20d332aSBharata B Rao for (i = 0; i < nr_lmbs; i++) { 2902fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 2903c20d332aSBharata B Rao addr / SPAPR_MEMORY_BLOCK_SIZE); 2904c20d332aSBharata B Rao g_assert(drc); 2905c20d332aSBharata B Rao 2906c20d332aSBharata B Rao fdt = create_device_tree(&fdt_size); 2907c20d332aSBharata B Rao fdt_offset = spapr_populate_memory_node(fdt, node, addr, 2908c20d332aSBharata B Rao SPAPR_MEMORY_BLOCK_SIZE); 2909c20d332aSBharata B Rao 2910160bb678SGreg Kurz spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err); 2911160bb678SGreg Kurz if (local_err) { 2912160bb678SGreg Kurz while (addr > addr_start) { 2913160bb678SGreg Kurz addr -= SPAPR_MEMORY_BLOCK_SIZE; 2914160bb678SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 2915160bb678SGreg Kurz addr / SPAPR_MEMORY_BLOCK_SIZE); 2916a8dc47fdSDavid Gibson spapr_drc_detach(drc); 2917160bb678SGreg Kurz } 2918160bb678SGreg Kurz g_free(fdt); 2919160bb678SGreg Kurz error_propagate(errp, local_err); 2920160bb678SGreg Kurz return; 2921160bb678SGreg Kurz } 292294fd9cbaSLaurent Vivier if (!hotplugged) { 292394fd9cbaSLaurent Vivier spapr_drc_reset(drc); 292494fd9cbaSLaurent Vivier } 2925c20d332aSBharata B Rao addr += SPAPR_MEMORY_BLOCK_SIZE; 2926c20d332aSBharata B Rao } 29275dd5238cSJianjun Duan /* send hotplug notification to the 29285dd5238cSJianjun Duan * guest only in case of hotplugged memory 29295dd5238cSJianjun Duan */ 293094fd9cbaSLaurent Vivier if (hotplugged) { 293179b78a6bSMichael Roth if (dedicated_hp_event_source) { 2932fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 293379b78a6bSMichael Roth addr_start / SPAPR_MEMORY_BLOCK_SIZE); 293479b78a6bSMichael Roth spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 293579b78a6bSMichael Roth nr_lmbs, 29360b55aa91SDavid Gibson spapr_drc_index(drc)); 293779b78a6bSMichael Roth } else { 293879b78a6bSMichael Roth spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB, 293979b78a6bSMichael Roth nr_lmbs); 294079b78a6bSMichael Roth } 2941c20d332aSBharata B Rao } 29425dd5238cSJianjun Duan } 2943c20d332aSBharata B Rao 2944c20d332aSBharata B Rao static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 2945c20d332aSBharata B Rao uint32_t node, Error **errp) 2946c20d332aSBharata B Rao { 2947c20d332aSBharata B Rao Error *local_err = NULL; 2948c20d332aSBharata B Rao sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev); 2949c20d332aSBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 2950c20d332aSBharata B Rao PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); 295104790978SThomas Huth MemoryRegion *mr; 295204790978SThomas Huth uint64_t align, size, addr; 295304790978SThomas Huth 295404790978SThomas Huth mr = ddc->get_memory_region(dimm, &local_err); 295504790978SThomas Huth if (local_err) { 295604790978SThomas Huth goto out; 295704790978SThomas Huth } 295804790978SThomas Huth align = memory_region_get_alignment(mr); 295904790978SThomas Huth size = memory_region_size(mr); 2960df587133SThomas Huth 2961d6a9b0b8SMichael S. Tsirkin pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err); 2962c20d332aSBharata B Rao if (local_err) { 2963c20d332aSBharata B Rao goto out; 2964c20d332aSBharata B Rao } 2965c20d332aSBharata B Rao 29669ed442b8SMarc-André Lureau addr = object_property_get_uint(OBJECT(dimm), 29679ed442b8SMarc-André Lureau PC_DIMM_ADDR_PROP, &local_err); 2968c20d332aSBharata B Rao if (local_err) { 2969160bb678SGreg Kurz goto out_unplug; 2970c20d332aSBharata B Rao } 2971c20d332aSBharata B Rao 297279b78a6bSMichael Roth spapr_add_lmbs(dev, addr, size, node, 297379b78a6bSMichael Roth spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT), 2974160bb678SGreg Kurz &local_err); 2975160bb678SGreg Kurz if (local_err) { 2976160bb678SGreg Kurz goto out_unplug; 2977160bb678SGreg Kurz } 2978c20d332aSBharata B Rao 2979160bb678SGreg Kurz return; 2980160bb678SGreg Kurz 2981160bb678SGreg Kurz out_unplug: 2982160bb678SGreg Kurz pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr); 2983c20d332aSBharata B Rao out: 2984c20d332aSBharata B Rao error_propagate(errp, local_err); 2985c20d332aSBharata B Rao } 2986c20d332aSBharata B Rao 2987c871bc70SLaurent Vivier static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 2988c871bc70SLaurent Vivier Error **errp) 2989c871bc70SLaurent Vivier { 2990c871bc70SLaurent Vivier PCDIMMDevice *dimm = PC_DIMM(dev); 2991c871bc70SLaurent Vivier PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); 299204790978SThomas Huth MemoryRegion *mr; 299304790978SThomas Huth uint64_t size; 2994c871bc70SLaurent Vivier char *mem_dev; 2995c871bc70SLaurent Vivier 299604790978SThomas Huth mr = ddc->get_memory_region(dimm, errp); 299704790978SThomas Huth if (!mr) { 299804790978SThomas Huth return; 299904790978SThomas Huth } 300004790978SThomas Huth size = memory_region_size(mr); 300104790978SThomas Huth 3002c871bc70SLaurent Vivier if (size % SPAPR_MEMORY_BLOCK_SIZE) { 3003c871bc70SLaurent Vivier error_setg(errp, "Hotplugged memory size must be a multiple of " 3004c871bc70SLaurent Vivier "%lld MB", SPAPR_MEMORY_BLOCK_SIZE / M_BYTE); 3005c871bc70SLaurent Vivier return; 3006c871bc70SLaurent Vivier } 3007c871bc70SLaurent Vivier 3008c871bc70SLaurent Vivier mem_dev = object_property_get_str(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, NULL); 3009c871bc70SLaurent Vivier if (mem_dev && !kvmppc_is_mem_backend_page_size_ok(mem_dev)) { 3010c871bc70SLaurent Vivier error_setg(errp, "Memory backend has bad page size. " 3011c871bc70SLaurent Vivier "Use 'memory-backend-file' with correct mem-path."); 30128a9e0e7bSGreg Kurz goto out; 3013c871bc70SLaurent Vivier } 30148a9e0e7bSGreg Kurz 30158a9e0e7bSGreg Kurz out: 30168a9e0e7bSGreg Kurz g_free(mem_dev); 3017c871bc70SLaurent Vivier } 3018c871bc70SLaurent Vivier 30190cffce56SDavid Gibson struct sPAPRDIMMState { 30200cffce56SDavid Gibson PCDIMMDevice *dimm; 3021cf632463SBharata B Rao uint32_t nr_lmbs; 30220cffce56SDavid Gibson QTAILQ_ENTRY(sPAPRDIMMState) next; 30230cffce56SDavid Gibson }; 30240cffce56SDavid Gibson 30250cffce56SDavid Gibson static sPAPRDIMMState *spapr_pending_dimm_unplugs_find(sPAPRMachineState *s, 30260cffce56SDavid Gibson PCDIMMDevice *dimm) 30270cffce56SDavid Gibson { 30280cffce56SDavid Gibson sPAPRDIMMState *dimm_state = NULL; 30290cffce56SDavid Gibson 30300cffce56SDavid Gibson QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) { 30310cffce56SDavid Gibson if (dimm_state->dimm == dimm) { 30320cffce56SDavid Gibson break; 30330cffce56SDavid Gibson } 30340cffce56SDavid Gibson } 30350cffce56SDavid Gibson return dimm_state; 30360cffce56SDavid Gibson } 30370cffce56SDavid Gibson 30388d5981c4SBharata B Rao static sPAPRDIMMState *spapr_pending_dimm_unplugs_add(sPAPRMachineState *spapr, 30398d5981c4SBharata B Rao uint32_t nr_lmbs, 30408d5981c4SBharata B Rao PCDIMMDevice *dimm) 30410cffce56SDavid Gibson { 30428d5981c4SBharata B Rao sPAPRDIMMState *ds = NULL; 30438d5981c4SBharata B Rao 30448d5981c4SBharata B Rao /* 30458d5981c4SBharata B Rao * If this request is for a DIMM whose removal had failed earlier 30468d5981c4SBharata B Rao * (due to guest's refusal to remove the LMBs), we would have this 30478d5981c4SBharata B Rao * dimm already in the pending_dimm_unplugs list. In that 30488d5981c4SBharata B Rao * case don't add again. 30498d5981c4SBharata B Rao */ 30508d5981c4SBharata B Rao ds = spapr_pending_dimm_unplugs_find(spapr, dimm); 30518d5981c4SBharata B Rao if (!ds) { 30528d5981c4SBharata B Rao ds = g_malloc0(sizeof(sPAPRDIMMState)); 30538d5981c4SBharata B Rao ds->nr_lmbs = nr_lmbs; 30548d5981c4SBharata B Rao ds->dimm = dimm; 30558d5981c4SBharata B Rao QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next); 30568d5981c4SBharata B Rao } 30578d5981c4SBharata B Rao return ds; 30580cffce56SDavid Gibson } 30590cffce56SDavid Gibson 30600cffce56SDavid Gibson static void spapr_pending_dimm_unplugs_remove(sPAPRMachineState *spapr, 30610cffce56SDavid Gibson sPAPRDIMMState *dimm_state) 30620cffce56SDavid Gibson { 30630cffce56SDavid Gibson QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next); 30640cffce56SDavid Gibson g_free(dimm_state); 30650cffce56SDavid Gibson } 3066cf632463SBharata B Rao 306716ee9980SDaniel Henrique Barboza static sPAPRDIMMState *spapr_recover_pending_dimm_state(sPAPRMachineState *ms, 306816ee9980SDaniel Henrique Barboza PCDIMMDevice *dimm) 306916ee9980SDaniel Henrique Barboza { 307016ee9980SDaniel Henrique Barboza sPAPRDRConnector *drc; 307116ee9980SDaniel Henrique Barboza PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); 307204790978SThomas Huth MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort); 307316ee9980SDaniel Henrique Barboza uint64_t size = memory_region_size(mr); 307416ee9980SDaniel Henrique Barboza uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 307516ee9980SDaniel Henrique Barboza uint32_t avail_lmbs = 0; 307616ee9980SDaniel Henrique Barboza uint64_t addr_start, addr; 307716ee9980SDaniel Henrique Barboza int i; 307816ee9980SDaniel Henrique Barboza 307916ee9980SDaniel Henrique Barboza addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, 308016ee9980SDaniel Henrique Barboza &error_abort); 308116ee9980SDaniel Henrique Barboza 308216ee9980SDaniel Henrique Barboza addr = addr_start; 308316ee9980SDaniel Henrique Barboza for (i = 0; i < nr_lmbs; i++) { 3084fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 308516ee9980SDaniel Henrique Barboza addr / SPAPR_MEMORY_BLOCK_SIZE); 308616ee9980SDaniel Henrique Barboza g_assert(drc); 3087454b580aSDavid Gibson if (drc->dev) { 308816ee9980SDaniel Henrique Barboza avail_lmbs++; 308916ee9980SDaniel Henrique Barboza } 309016ee9980SDaniel Henrique Barboza addr += SPAPR_MEMORY_BLOCK_SIZE; 309116ee9980SDaniel Henrique Barboza } 309216ee9980SDaniel Henrique Barboza 30938d5981c4SBharata B Rao return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm); 309416ee9980SDaniel Henrique Barboza } 309516ee9980SDaniel Henrique Barboza 309631834723SDaniel Henrique Barboza /* Callback to be called during DRC release. */ 309731834723SDaniel Henrique Barboza void spapr_lmb_release(DeviceState *dev) 3098cf632463SBharata B Rao { 3099765d1bddSDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_hotplug_handler(dev)); 3100765d1bddSDavid Gibson PCDIMMDevice *dimm = PC_DIMM(dev); 3101765d1bddSDavid Gibson PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); 310204790978SThomas Huth MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort); 31030cffce56SDavid Gibson sPAPRDIMMState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); 3104cf632463SBharata B Rao 310516ee9980SDaniel Henrique Barboza /* This information will get lost if a migration occurs 310616ee9980SDaniel Henrique Barboza * during the unplug process. In this case recover it. */ 310716ee9980SDaniel Henrique Barboza if (ds == NULL) { 310816ee9980SDaniel Henrique Barboza ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev)); 31098d5981c4SBharata B Rao g_assert(ds); 3110454b580aSDavid Gibson /* The DRC being examined by the caller at least must be counted */ 3111454b580aSDavid Gibson g_assert(ds->nr_lmbs); 311216ee9980SDaniel Henrique Barboza } 3113454b580aSDavid Gibson 3114454b580aSDavid Gibson if (--ds->nr_lmbs) { 3115cf632463SBharata B Rao return; 3116cf632463SBharata B Rao } 3117cf632463SBharata B Rao 3118cf632463SBharata B Rao /* 3119cf632463SBharata B Rao * Now that all the LMBs have been removed by the guest, call the 3120cf632463SBharata B Rao * pc-dimm unplug handler to cleanup up the pc-dimm device. 3121cf632463SBharata B Rao */ 3122765d1bddSDavid Gibson pc_dimm_memory_unplug(dev, &spapr->hotplug_memory, mr); 3123cf632463SBharata B Rao object_unparent(OBJECT(dev)); 31242a129767SDaniel Henrique Barboza spapr_pending_dimm_unplugs_remove(spapr, ds); 3125cf632463SBharata B Rao } 3126cf632463SBharata B Rao 3127cf632463SBharata B Rao static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev, 3128cf632463SBharata B Rao DeviceState *dev, Error **errp) 3129cf632463SBharata B Rao { 31300cffce56SDavid Gibson sPAPRMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3131cf632463SBharata B Rao Error *local_err = NULL; 3132cf632463SBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 3133cf632463SBharata B Rao PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); 313404790978SThomas Huth MemoryRegion *mr; 313504790978SThomas Huth uint32_t nr_lmbs; 313604790978SThomas Huth uint64_t size, addr_start, addr; 31370cffce56SDavid Gibson int i; 31380cffce56SDavid Gibson sPAPRDRConnector *drc; 313904790978SThomas Huth 314004790978SThomas Huth mr = ddc->get_memory_region(dimm, &local_err); 314104790978SThomas Huth if (local_err) { 314204790978SThomas Huth goto out; 314304790978SThomas Huth } 314404790978SThomas Huth size = memory_region_size(mr); 314504790978SThomas Huth nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 314604790978SThomas Huth 31479ed442b8SMarc-André Lureau addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, 31480cffce56SDavid Gibson &local_err); 3149cf632463SBharata B Rao if (local_err) { 3150cf632463SBharata B Rao goto out; 3151cf632463SBharata B Rao } 3152cf632463SBharata B Rao 31532a129767SDaniel Henrique Barboza /* 31542a129767SDaniel Henrique Barboza * An existing pending dimm state for this DIMM means that there is an 31552a129767SDaniel Henrique Barboza * unplug operation in progress, waiting for the spapr_lmb_release 31562a129767SDaniel Henrique Barboza * callback to complete the job (BQL can't cover that far). In this case, 31572a129767SDaniel Henrique Barboza * bail out to avoid detaching DRCs that were already released. 31582a129767SDaniel Henrique Barboza */ 31592a129767SDaniel Henrique Barboza if (spapr_pending_dimm_unplugs_find(spapr, dimm)) { 31602a129767SDaniel Henrique Barboza error_setg(&local_err, 31612a129767SDaniel Henrique Barboza "Memory unplug already in progress for device %s", 31622a129767SDaniel Henrique Barboza dev->id); 31632a129767SDaniel Henrique Barboza goto out; 31642a129767SDaniel Henrique Barboza } 31652a129767SDaniel Henrique Barboza 31668d5981c4SBharata B Rao spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm); 31670cffce56SDavid Gibson 31680cffce56SDavid Gibson addr = addr_start; 31690cffce56SDavid Gibson for (i = 0; i < nr_lmbs; i++) { 3170fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 31710cffce56SDavid Gibson addr / SPAPR_MEMORY_BLOCK_SIZE); 31720cffce56SDavid Gibson g_assert(drc); 31730cffce56SDavid Gibson 3174a8dc47fdSDavid Gibson spapr_drc_detach(drc); 31750cffce56SDavid Gibson addr += SPAPR_MEMORY_BLOCK_SIZE; 31760cffce56SDavid Gibson } 31770cffce56SDavid Gibson 3178fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 31790cffce56SDavid Gibson addr_start / SPAPR_MEMORY_BLOCK_SIZE); 31800cffce56SDavid Gibson spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 31810b55aa91SDavid Gibson nr_lmbs, spapr_drc_index(drc)); 3182cf632463SBharata B Rao out: 3183cf632463SBharata B Rao error_propagate(errp, local_err); 3184cf632463SBharata B Rao } 3185cf632463SBharata B Rao 318604d0ffbdSGreg Kurz static void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset, 3187af81cf32SBharata B Rao sPAPRMachineState *spapr) 3188af81cf32SBharata B Rao { 3189af81cf32SBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(cs); 3190af81cf32SBharata B Rao DeviceClass *dc = DEVICE_GET_CLASS(cs); 31912e886fb3SSam Bobroff int id = spapr_vcpu_id(cpu); 3192af81cf32SBharata B Rao void *fdt; 3193af81cf32SBharata B Rao int offset, fdt_size; 3194af81cf32SBharata B Rao char *nodename; 3195af81cf32SBharata B Rao 3196af81cf32SBharata B Rao fdt = create_device_tree(&fdt_size); 3197af81cf32SBharata B Rao nodename = g_strdup_printf("%s@%x", dc->fw_name, id); 3198af81cf32SBharata B Rao offset = fdt_add_subnode(fdt, 0, nodename); 3199af81cf32SBharata B Rao 3200af81cf32SBharata B Rao spapr_populate_cpu_dt(cs, fdt, offset, spapr); 3201af81cf32SBharata B Rao g_free(nodename); 3202af81cf32SBharata B Rao 3203af81cf32SBharata B Rao *fdt_offset = offset; 3204af81cf32SBharata B Rao return fdt; 3205af81cf32SBharata B Rao } 3206af81cf32SBharata B Rao 3207765d1bddSDavid Gibson /* Callback to be called during DRC release. */ 3208765d1bddSDavid Gibson void spapr_core_release(DeviceState *dev) 3209ff9006ddSIgor Mammedov { 3210765d1bddSDavid Gibson MachineState *ms = MACHINE(qdev_get_hotplug_handler(dev)); 321146f7afa3SGreg Kurz sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); 3212ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3213535455fdSIgor Mammedov CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); 3214ff9006ddSIgor Mammedov 321546f7afa3SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 321646f7afa3SGreg Kurz sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); 321746f7afa3SGreg Kurz int i; 321846f7afa3SGreg Kurz 321946f7afa3SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 322094ad93bdSGreg Kurz CPUState *cs = CPU(sc->threads[i]); 322146f7afa3SGreg Kurz 322246f7afa3SGreg Kurz pre_2_10_vmstate_register_dummy_icp(cs->cpu_index); 322346f7afa3SGreg Kurz } 322446f7afa3SGreg Kurz } 322546f7afa3SGreg Kurz 322607572c06SGreg Kurz assert(core_slot); 3227535455fdSIgor Mammedov core_slot->cpu = NULL; 3228ff9006ddSIgor Mammedov object_unparent(OBJECT(dev)); 3229ff9006ddSIgor Mammedov } 3230ff9006ddSIgor Mammedov 3231115debf2SIgor Mammedov static 3232115debf2SIgor Mammedov void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev, 3233ff9006ddSIgor Mammedov Error **errp) 3234ff9006ddSIgor Mammedov { 3235535455fdSIgor Mammedov int index; 3236535455fdSIgor Mammedov sPAPRDRConnector *drc; 3237535455fdSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3238535455fdSIgor Mammedov int smt = kvmppc_smt_threads(); 3239ff9006ddSIgor Mammedov 3240535455fdSIgor Mammedov if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) { 3241535455fdSIgor Mammedov error_setg(errp, "Unable to find CPU core with core-id: %d", 3242535455fdSIgor Mammedov cc->core_id); 3243535455fdSIgor Mammedov return; 3244535455fdSIgor Mammedov } 3245ff9006ddSIgor Mammedov if (index == 0) { 3246ff9006ddSIgor Mammedov error_setg(errp, "Boot CPU core may not be unplugged"); 3247ff9006ddSIgor Mammedov return; 3248ff9006ddSIgor Mammedov } 3249ff9006ddSIgor Mammedov 3250fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index * smt); 3251ff9006ddSIgor Mammedov g_assert(drc); 3252ff9006ddSIgor Mammedov 3253a8dc47fdSDavid Gibson spapr_drc_detach(drc); 3254ff9006ddSIgor Mammedov 3255ff9006ddSIgor Mammedov spapr_hotplug_req_remove_by_index(drc); 3256ff9006ddSIgor Mammedov } 3257ff9006ddSIgor Mammedov 3258ff9006ddSIgor Mammedov static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3259ff9006ddSIgor Mammedov Error **errp) 3260ff9006ddSIgor Mammedov { 3261ff9006ddSIgor Mammedov sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3262ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(spapr); 326346f7afa3SGreg Kurz sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 3264ff9006ddSIgor Mammedov sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev)); 3265ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 326694ad93bdSGreg Kurz CPUState *cs = CPU(core->threads[0]); 3267ff9006ddSIgor Mammedov sPAPRDRConnector *drc; 3268ff9006ddSIgor Mammedov Error *local_err = NULL; 3269ff9006ddSIgor Mammedov int smt = kvmppc_smt_threads(); 3270535455fdSIgor Mammedov CPUArchId *core_slot; 3271535455fdSIgor Mammedov int index; 327294fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 3273ff9006ddSIgor Mammedov 3274535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 3275535455fdSIgor Mammedov if (!core_slot) { 3276535455fdSIgor Mammedov error_setg(errp, "Unable to find CPU core with core-id: %d", 3277535455fdSIgor Mammedov cc->core_id); 3278535455fdSIgor Mammedov return; 3279535455fdSIgor Mammedov } 3280fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index * smt); 3281ff9006ddSIgor Mammedov 3282c5514d0eSIgor Mammedov g_assert(drc || !mc->has_hotpluggable_cpus); 3283ff9006ddSIgor Mammedov 3284e49c63d5SGreg Kurz if (drc) { 3285e49c63d5SGreg Kurz void *fdt; 3286e49c63d5SGreg Kurz int fdt_offset; 3287e49c63d5SGreg Kurz 3288ff9006ddSIgor Mammedov fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr); 3289ff9006ddSIgor Mammedov 32905c1da812SDavid Gibson spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err); 3291ff9006ddSIgor Mammedov if (local_err) { 3292ff9006ddSIgor Mammedov g_free(fdt); 3293ff9006ddSIgor Mammedov error_propagate(errp, local_err); 3294ff9006ddSIgor Mammedov return; 3295ff9006ddSIgor Mammedov } 3296ff9006ddSIgor Mammedov 329794fd9cbaSLaurent Vivier if (hotplugged) { 3298ff9006ddSIgor Mammedov /* 329994fd9cbaSLaurent Vivier * Send hotplug notification interrupt to the guest only 330094fd9cbaSLaurent Vivier * in case of hotplugged CPUs. 3301ff9006ddSIgor Mammedov */ 3302ff9006ddSIgor Mammedov spapr_hotplug_req_add_by_index(drc); 330394fd9cbaSLaurent Vivier } else { 330494fd9cbaSLaurent Vivier spapr_drc_reset(drc); 3305ff9006ddSIgor Mammedov } 330694fd9cbaSLaurent Vivier } 330794fd9cbaSLaurent Vivier 3308535455fdSIgor Mammedov core_slot->cpu = OBJECT(dev); 330946f7afa3SGreg Kurz 331046f7afa3SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 331146f7afa3SGreg Kurz int i; 331246f7afa3SGreg Kurz 331346f7afa3SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 331446f7afa3SGreg Kurz sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev); 331546f7afa3SGreg Kurz 331694ad93bdSGreg Kurz cs = CPU(sc->threads[i]); 331746f7afa3SGreg Kurz pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); 331846f7afa3SGreg Kurz } 331946f7afa3SGreg Kurz } 3320ff9006ddSIgor Mammedov } 3321ff9006ddSIgor Mammedov 3322ff9006ddSIgor Mammedov static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3323ff9006ddSIgor Mammedov Error **errp) 3324ff9006ddSIgor Mammedov { 3325ff9006ddSIgor Mammedov MachineState *machine = MACHINE(OBJECT(hotplug_dev)); 3326ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); 3327ff9006ddSIgor Mammedov Error *local_err = NULL; 3328ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 33292e9c10ebSIgor Mammedov const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type); 3330ff9006ddSIgor Mammedov const char *type = object_get_typename(OBJECT(dev)); 3331535455fdSIgor Mammedov CPUArchId *core_slot; 3332535455fdSIgor Mammedov int index; 3333ff9006ddSIgor Mammedov 3334c5514d0eSIgor Mammedov if (dev->hotplugged && !mc->has_hotpluggable_cpus) { 3335ff9006ddSIgor Mammedov error_setg(&local_err, "CPU hotplug not supported for this machine"); 3336ff9006ddSIgor Mammedov goto out; 3337ff9006ddSIgor Mammedov } 3338ff9006ddSIgor Mammedov 3339ff9006ddSIgor Mammedov if (strcmp(base_core_type, type)) { 3340ff9006ddSIgor Mammedov error_setg(&local_err, "CPU core type should be %s", base_core_type); 3341ff9006ddSIgor Mammedov goto out; 3342ff9006ddSIgor Mammedov } 3343ff9006ddSIgor Mammedov 3344ff9006ddSIgor Mammedov if (cc->core_id % smp_threads) { 3345ff9006ddSIgor Mammedov error_setg(&local_err, "invalid core id %d", cc->core_id); 3346ff9006ddSIgor Mammedov goto out; 3347ff9006ddSIgor Mammedov } 3348ff9006ddSIgor Mammedov 3349459264efSDavid Gibson /* 3350459264efSDavid Gibson * In general we should have homogeneous threads-per-core, but old 3351459264efSDavid Gibson * (pre hotplug support) machine types allow the last core to have 3352459264efSDavid Gibson * reduced threads as a compatibility hack for when we allowed 3353459264efSDavid Gibson * total vcpus not a multiple of threads-per-core. 3354459264efSDavid Gibson */ 3355459264efSDavid Gibson if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) { 3356df8658deSGreg Kurz error_setg(&local_err, "invalid nr-threads %d, must be %d", 33578149e299SDavid Gibson cc->nr_threads, smp_threads); 3358df8658deSGreg Kurz goto out; 33598149e299SDavid Gibson } 33608149e299SDavid Gibson 3361535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 3362535455fdSIgor Mammedov if (!core_slot) { 3363ff9006ddSIgor Mammedov error_setg(&local_err, "core id %d out of range", cc->core_id); 3364ff9006ddSIgor Mammedov goto out; 3365ff9006ddSIgor Mammedov } 3366ff9006ddSIgor Mammedov 3367535455fdSIgor Mammedov if (core_slot->cpu) { 3368ff9006ddSIgor Mammedov error_setg(&local_err, "core %d already populated", cc->core_id); 3369ff9006ddSIgor Mammedov goto out; 3370ff9006ddSIgor Mammedov } 3371ff9006ddSIgor Mammedov 3372a0ceb640SIgor Mammedov numa_cpu_pre_plug(core_slot, dev, &local_err); 33730b8497f0SIgor Mammedov 3374ff9006ddSIgor Mammedov out: 3375ff9006ddSIgor Mammedov error_propagate(errp, local_err); 3376ff9006ddSIgor Mammedov } 3377ff9006ddSIgor Mammedov 3378c20d332aSBharata B Rao static void spapr_machine_device_plug(HotplugHandler *hotplug_dev, 3379c20d332aSBharata B Rao DeviceState *dev, Error **errp) 3380c20d332aSBharata B Rao { 3381c86c1affSDaniel Henrique Barboza MachineState *ms = MACHINE(hotplug_dev); 3382c86c1affSDaniel Henrique Barboza sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); 3383c20d332aSBharata B Rao 3384c20d332aSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 3385b556854bSBharata B Rao int node; 3386c20d332aSBharata B Rao 3387c20d332aSBharata B Rao if (!smc->dr_lmb_enabled) { 3388c20d332aSBharata B Rao error_setg(errp, "Memory hotplug not supported for this machine"); 3389c20d332aSBharata B Rao return; 3390c20d332aSBharata B Rao } 33919ed442b8SMarc-André Lureau node = object_property_get_uint(OBJECT(dev), PC_DIMM_NODE_PROP, errp); 3392c20d332aSBharata B Rao if (*errp) { 3393c20d332aSBharata B Rao return; 3394c20d332aSBharata B Rao } 33951a5512bbSGonglei if (node < 0 || node >= MAX_NODES) { 33961a5512bbSGonglei error_setg(errp, "Invaild node %d", node); 33971a5512bbSGonglei return; 33981a5512bbSGonglei } 3399c20d332aSBharata B Rao 3400b556854bSBharata B Rao /* 3401b556854bSBharata B Rao * Currently PowerPC kernel doesn't allow hot-adding memory to 3402b556854bSBharata B Rao * memory-less node, but instead will silently add the memory 3403b556854bSBharata B Rao * to the first node that has some memory. This causes two 3404b556854bSBharata B Rao * unexpected behaviours for the user. 3405b556854bSBharata B Rao * 3406b556854bSBharata B Rao * - Memory gets hotplugged to a different node than what the user 3407b556854bSBharata B Rao * specified. 3408b556854bSBharata B Rao * - Since pc-dimm subsystem in QEMU still thinks that memory belongs 3409b556854bSBharata B Rao * to memory-less node, a reboot will set things accordingly 3410b556854bSBharata B Rao * and the previously hotplugged memory now ends in the right node. 3411b556854bSBharata B Rao * This appears as if some memory moved from one node to another. 3412b556854bSBharata B Rao * 3413b556854bSBharata B Rao * So until kernel starts supporting memory hotplug to memory-less 3414b556854bSBharata B Rao * nodes, just prevent such attempts upfront in QEMU. 3415b556854bSBharata B Rao */ 3416b556854bSBharata B Rao if (nb_numa_nodes && !numa_info[node].node_mem) { 3417b556854bSBharata B Rao error_setg(errp, "Can't hotplug memory to memory-less node %d", 3418b556854bSBharata B Rao node); 3419b556854bSBharata B Rao return; 3420b556854bSBharata B Rao } 3421b556854bSBharata B Rao 3422c20d332aSBharata B Rao spapr_memory_plug(hotplug_dev, dev, node, errp); 3423af81cf32SBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 3424af81cf32SBharata B Rao spapr_core_plug(hotplug_dev, dev, errp); 3425c20d332aSBharata B Rao } 3426c20d332aSBharata B Rao } 3427c20d332aSBharata B Rao 3428cf632463SBharata B Rao static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev, 3429cf632463SBharata B Rao DeviceState *dev, Error **errp) 3430cf632463SBharata B Rao { 3431c86c1affSDaniel Henrique Barboza sPAPRMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3432c86c1affSDaniel Henrique Barboza MachineClass *mc = MACHINE_GET_CLASS(sms); 3433cf632463SBharata B Rao 3434cf632463SBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 3435cf632463SBharata B Rao if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) { 3436cf632463SBharata B Rao spapr_memory_unplug_request(hotplug_dev, dev, errp); 3437cf632463SBharata B Rao } else { 3438cf632463SBharata B Rao /* NOTE: this means there is a window after guest reset, prior to 3439cf632463SBharata B Rao * CAS negotiation, where unplug requests will fail due to the 3440cf632463SBharata B Rao * capability not being detected yet. This is a bit different than 3441cf632463SBharata B Rao * the case with PCI unplug, where the events will be queued and 3442cf632463SBharata B Rao * eventually handled by the guest after boot 3443cf632463SBharata B Rao */ 3444cf632463SBharata B Rao error_setg(errp, "Memory hot unplug not supported for this guest"); 3445cf632463SBharata B Rao } 34466f4b5c3eSBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 3447c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 34486f4b5c3eSBharata B Rao error_setg(errp, "CPU hot unplug not supported on this machine"); 34496f4b5c3eSBharata B Rao return; 34506f4b5c3eSBharata B Rao } 3451115debf2SIgor Mammedov spapr_core_unplug_request(hotplug_dev, dev, errp); 3452c20d332aSBharata B Rao } 3453c20d332aSBharata B Rao } 3454c20d332aSBharata B Rao 345594a94e4cSBharata B Rao static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev, 345694a94e4cSBharata B Rao DeviceState *dev, Error **errp) 345794a94e4cSBharata B Rao { 3458c871bc70SLaurent Vivier if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 3459c871bc70SLaurent Vivier spapr_memory_pre_plug(hotplug_dev, dev, errp); 3460c871bc70SLaurent Vivier } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 346194a94e4cSBharata B Rao spapr_core_pre_plug(hotplug_dev, dev, errp); 346294a94e4cSBharata B Rao } 346394a94e4cSBharata B Rao } 346494a94e4cSBharata B Rao 34657ebaf795SBharata B Rao static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine, 3466c20d332aSBharata B Rao DeviceState *dev) 3467c20d332aSBharata B Rao { 346894a94e4cSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || 346994a94e4cSBharata B Rao object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 3470c20d332aSBharata B Rao return HOTPLUG_HANDLER(machine); 3471c20d332aSBharata B Rao } 3472c20d332aSBharata B Rao return NULL; 3473c20d332aSBharata B Rao } 3474c20d332aSBharata B Rao 3475ea089eebSIgor Mammedov static CpuInstanceProperties 3476ea089eebSIgor Mammedov spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index) 347720bb648dSDavid Gibson { 3478ea089eebSIgor Mammedov CPUArchId *core_slot; 3479ea089eebSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 3480ea089eebSIgor Mammedov 3481ea089eebSIgor Mammedov /* make sure possible_cpu are intialized */ 3482ea089eebSIgor Mammedov mc->possible_cpu_arch_ids(machine); 3483ea089eebSIgor Mammedov /* get CPU core slot containing thread that matches cpu_index */ 3484ea089eebSIgor Mammedov core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL); 3485ea089eebSIgor Mammedov assert(core_slot); 3486ea089eebSIgor Mammedov return core_slot->props; 348720bb648dSDavid Gibson } 348820bb648dSDavid Gibson 348979e07936SIgor Mammedov static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx) 349079e07936SIgor Mammedov { 349179e07936SIgor Mammedov return idx / smp_cores % nb_numa_nodes; 349279e07936SIgor Mammedov } 349379e07936SIgor Mammedov 3494535455fdSIgor Mammedov static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) 3495535455fdSIgor Mammedov { 3496535455fdSIgor Mammedov int i; 3497535455fdSIgor Mammedov int spapr_max_cores = max_cpus / smp_threads; 3498535455fdSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 3499535455fdSIgor Mammedov 3500c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 3501535455fdSIgor Mammedov spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads; 3502535455fdSIgor Mammedov } 3503535455fdSIgor Mammedov if (machine->possible_cpus) { 3504535455fdSIgor Mammedov assert(machine->possible_cpus->len == spapr_max_cores); 3505535455fdSIgor Mammedov return machine->possible_cpus; 3506535455fdSIgor Mammedov } 3507535455fdSIgor Mammedov 3508535455fdSIgor Mammedov machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + 3509535455fdSIgor Mammedov sizeof(CPUArchId) * spapr_max_cores); 3510535455fdSIgor Mammedov machine->possible_cpus->len = spapr_max_cores; 3511535455fdSIgor Mammedov for (i = 0; i < machine->possible_cpus->len; i++) { 3512535455fdSIgor Mammedov int core_id = i * smp_threads; 3513535455fdSIgor Mammedov 3514f2d672c2SIgor Mammedov machine->possible_cpus->cpus[i].vcpus_count = smp_threads; 3515535455fdSIgor Mammedov machine->possible_cpus->cpus[i].arch_id = core_id; 3516535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.has_core_id = true; 3517535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.core_id = core_id; 3518535455fdSIgor Mammedov } 3519535455fdSIgor Mammedov return machine->possible_cpus; 3520535455fdSIgor Mammedov } 3521535455fdSIgor Mammedov 35226737d9adSDavid Gibson static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index, 3523daa23699SDavid Gibson uint64_t *buid, hwaddr *pio, 3524daa23699SDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 35256737d9adSDavid Gibson unsigned n_dma, uint32_t *liobns, Error **errp) 35266737d9adSDavid Gibson { 3527357d1e3bSDavid Gibson /* 3528357d1e3bSDavid Gibson * New-style PHB window placement. 3529357d1e3bSDavid Gibson * 3530357d1e3bSDavid Gibson * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window 3531357d1e3bSDavid Gibson * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO 3532357d1e3bSDavid Gibson * windows. 3533357d1e3bSDavid Gibson * 3534357d1e3bSDavid Gibson * Some guest kernels can't work with MMIO windows above 1<<46 3535357d1e3bSDavid Gibson * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB 3536357d1e3bSDavid Gibson * 3537357d1e3bSDavid Gibson * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each 3538357d1e3bSDavid Gibson * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the 3539357d1e3bSDavid Gibson * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the 3540357d1e3bSDavid Gibson * 1TiB 64-bit MMIO windows for each PHB. 3541357d1e3bSDavid Gibson */ 35426737d9adSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 354325e6a118SMichael S. Tsirkin #define SPAPR_MAX_PHBS ((SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / \ 354425e6a118SMichael S. Tsirkin SPAPR_PCI_MEM64_WIN_SIZE - 1) 35456737d9adSDavid Gibson int i; 35466737d9adSDavid Gibson 3547357d1e3bSDavid Gibson /* Sanity check natural alignments */ 3548357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 3549357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 3550357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0); 3551357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0); 3552357d1e3bSDavid Gibson /* Sanity check bounds */ 355325e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) > 355425e6a118SMichael S. Tsirkin SPAPR_PCI_MEM32_WIN_SIZE); 355525e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) > 355625e6a118SMichael S. Tsirkin SPAPR_PCI_MEM64_WIN_SIZE); 35572efff1c0SDavid Gibson 355825e6a118SMichael S. Tsirkin if (index >= SPAPR_MAX_PHBS) { 355925e6a118SMichael S. Tsirkin error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)", 356025e6a118SMichael S. Tsirkin SPAPR_MAX_PHBS - 1); 35616737d9adSDavid Gibson return; 35626737d9adSDavid Gibson } 35636737d9adSDavid Gibson 35646737d9adSDavid Gibson *buid = base_buid + index; 35656737d9adSDavid Gibson for (i = 0; i < n_dma; ++i) { 35666737d9adSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 35676737d9adSDavid Gibson } 35686737d9adSDavid Gibson 3569357d1e3bSDavid Gibson *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; 3570357d1e3bSDavid Gibson *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; 3571357d1e3bSDavid Gibson *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; 35726737d9adSDavid Gibson } 35736737d9adSDavid Gibson 35747844e12bSCédric Le Goater static ICSState *spapr_ics_get(XICSFabric *dev, int irq) 35757844e12bSCédric Le Goater { 35767844e12bSCédric Le Goater sPAPRMachineState *spapr = SPAPR_MACHINE(dev); 35777844e12bSCédric Le Goater 35787844e12bSCédric Le Goater return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL; 35797844e12bSCédric Le Goater } 35807844e12bSCédric Le Goater 35817844e12bSCédric Le Goater static void spapr_ics_resend(XICSFabric *dev) 35827844e12bSCédric Le Goater { 35837844e12bSCédric Le Goater sPAPRMachineState *spapr = SPAPR_MACHINE(dev); 35847844e12bSCédric Le Goater 35857844e12bSCédric Le Goater ics_resend(spapr->ics); 35867844e12bSCédric Le Goater } 35877844e12bSCédric Le Goater 358881210c20SSam Bobroff static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id) 3589b2fc59aaSCédric Le Goater { 35902e886fb3SSam Bobroff PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); 3591b2fc59aaSCédric Le Goater 35925bc8d26dSCédric Le Goater return cpu ? ICP(cpu->intc) : NULL; 3593b2fc59aaSCédric Le Goater } 3594b2fc59aaSCédric Le Goater 359560c6823bSCédric Le Goater #define ICS_IRQ_FREE(ics, srcno) \ 359660c6823bSCédric Le Goater (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK))) 359760c6823bSCédric Le Goater 359860c6823bSCédric Le Goater static int ics_find_free_block(ICSState *ics, int num, int alignnum) 359960c6823bSCédric Le Goater { 360060c6823bSCédric Le Goater int first, i; 360160c6823bSCédric Le Goater 360260c6823bSCédric Le Goater for (first = 0; first < ics->nr_irqs; first += alignnum) { 360360c6823bSCédric Le Goater if (num > (ics->nr_irqs - first)) { 360460c6823bSCédric Le Goater return -1; 360560c6823bSCédric Le Goater } 360660c6823bSCédric Le Goater for (i = first; i < first + num; ++i) { 360760c6823bSCédric Le Goater if (!ICS_IRQ_FREE(ics, i)) { 360860c6823bSCédric Le Goater break; 360960c6823bSCédric Le Goater } 361060c6823bSCédric Le Goater } 361160c6823bSCédric Le Goater if (i == (first + num)) { 361260c6823bSCédric Le Goater return first; 361360c6823bSCédric Le Goater } 361460c6823bSCédric Le Goater } 361560c6823bSCédric Le Goater 361660c6823bSCédric Le Goater return -1; 361760c6823bSCédric Le Goater } 361860c6823bSCédric Le Goater 36199e7dc5fcSCédric Le Goater /* 36209e7dc5fcSCédric Le Goater * Allocate the IRQ number and set the IRQ type, LSI or MSI 36219e7dc5fcSCédric Le Goater */ 36229e7dc5fcSCédric Le Goater static void spapr_irq_set_lsi(sPAPRMachineState *spapr, int irq, bool lsi) 36239e7dc5fcSCédric Le Goater { 36249e7dc5fcSCédric Le Goater ics_set_irq_type(spapr->ics, irq - spapr->ics->offset, lsi); 36259e7dc5fcSCédric Le Goater } 36269e7dc5fcSCédric Le Goater 362760c6823bSCédric Le Goater int spapr_irq_alloc(sPAPRMachineState *spapr, int irq_hint, bool lsi, 362860c6823bSCédric Le Goater Error **errp) 362960c6823bSCédric Le Goater { 363060c6823bSCédric Le Goater ICSState *ics = spapr->ics; 363160c6823bSCédric Le Goater int irq; 363260c6823bSCédric Le Goater 363360c6823bSCédric Le Goater if (!ics) { 363460c6823bSCédric Le Goater return -1; 363560c6823bSCédric Le Goater } 363660c6823bSCédric Le Goater if (irq_hint) { 363760c6823bSCédric Le Goater if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { 363860c6823bSCédric Le Goater error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint); 363960c6823bSCédric Le Goater return -1; 364060c6823bSCédric Le Goater } 364160c6823bSCédric Le Goater irq = irq_hint; 364260c6823bSCédric Le Goater } else { 364360c6823bSCédric Le Goater irq = ics_find_free_block(ics, 1, 1); 364460c6823bSCédric Le Goater if (irq < 0) { 364560c6823bSCédric Le Goater error_setg(errp, "can't allocate IRQ: no IRQ left"); 364660c6823bSCédric Le Goater return -1; 364760c6823bSCédric Le Goater } 364860c6823bSCédric Le Goater irq += ics->offset; 364960c6823bSCédric Le Goater } 365060c6823bSCédric Le Goater 36519e7dc5fcSCédric Le Goater spapr_irq_set_lsi(spapr, irq, lsi); 365260c6823bSCédric Le Goater trace_spapr_irq_alloc(irq); 365360c6823bSCédric Le Goater 365460c6823bSCédric Le Goater return irq; 365560c6823bSCédric Le Goater } 365660c6823bSCédric Le Goater 365760c6823bSCédric Le Goater /* 365860c6823bSCédric Le Goater * Allocate block of consecutive IRQs, and return the number of the first IRQ in 365960c6823bSCédric Le Goater * the block. If align==true, aligns the first IRQ number to num. 366060c6823bSCédric Le Goater */ 366160c6823bSCédric Le Goater int spapr_irq_alloc_block(sPAPRMachineState *spapr, int num, bool lsi, 366260c6823bSCédric Le Goater bool align, Error **errp) 366360c6823bSCédric Le Goater { 366460c6823bSCédric Le Goater ICSState *ics = spapr->ics; 366560c6823bSCédric Le Goater int i, first = -1; 366660c6823bSCédric Le Goater 366760c6823bSCédric Le Goater if (!ics) { 366860c6823bSCédric Le Goater return -1; 366960c6823bSCédric Le Goater } 367060c6823bSCédric Le Goater 367160c6823bSCédric Le Goater /* 367260c6823bSCédric Le Goater * MSIMesage::data is used for storing VIRQ so 367360c6823bSCédric Le Goater * it has to be aligned to num to support multiple 367460c6823bSCédric Le Goater * MSI vectors. MSI-X is not affected by this. 367560c6823bSCédric Le Goater * The hint is used for the first IRQ, the rest should 367660c6823bSCédric Le Goater * be allocated continuously. 367760c6823bSCédric Le Goater */ 367860c6823bSCédric Le Goater if (align) { 367960c6823bSCédric Le Goater assert((num == 1) || (num == 2) || (num == 4) || 368060c6823bSCédric Le Goater (num == 8) || (num == 16) || (num == 32)); 368160c6823bSCédric Le Goater first = ics_find_free_block(ics, num, num); 368260c6823bSCédric Le Goater } else { 368360c6823bSCédric Le Goater first = ics_find_free_block(ics, num, 1); 368460c6823bSCédric Le Goater } 368560c6823bSCédric Le Goater if (first < 0) { 368660c6823bSCédric Le Goater error_setg(errp, "can't find a free %d-IRQ block", num); 368760c6823bSCédric Le Goater return -1; 368860c6823bSCédric Le Goater } 368960c6823bSCédric Le Goater 369060c6823bSCédric Le Goater first += ics->offset; 36919e7dc5fcSCédric Le Goater for (i = first; i < first + num; ++i) { 36929e7dc5fcSCédric Le Goater spapr_irq_set_lsi(spapr, i, lsi); 36939e7dc5fcSCédric Le Goater } 369460c6823bSCédric Le Goater 369560c6823bSCédric Le Goater trace_spapr_irq_alloc_block(first, num, lsi, align); 369660c6823bSCédric Le Goater 369760c6823bSCédric Le Goater return first; 369860c6823bSCédric Le Goater } 369960c6823bSCédric Le Goater 370060c6823bSCédric Le Goater void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num) 370160c6823bSCédric Le Goater { 370260c6823bSCédric Le Goater ICSState *ics = spapr->ics; 370360c6823bSCédric Le Goater int srcno = irq - ics->offset; 370460c6823bSCédric Le Goater int i; 370560c6823bSCédric Le Goater 370660c6823bSCédric Le Goater if (ics_valid_irq(ics, irq)) { 370760c6823bSCédric Le Goater trace_spapr_irq_free(0, irq, num); 370860c6823bSCédric Le Goater for (i = srcno; i < srcno + num; ++i) { 370960c6823bSCédric Le Goater if (ICS_IRQ_FREE(ics, i)) { 371060c6823bSCédric Le Goater trace_spapr_irq_free_warn(0, i + ics->offset); 371160c6823bSCédric Le Goater } 371260c6823bSCédric Le Goater memset(&ics->irqs[i], 0, sizeof(ICSIRQState)); 371360c6823bSCédric Le Goater } 371460c6823bSCédric Le Goater } 371560c6823bSCédric Le Goater } 371660c6823bSCédric Le Goater 371777183755SCédric Le Goater qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq) 371877183755SCédric Le Goater { 371977183755SCédric Le Goater ICSState *ics = spapr->ics; 372077183755SCédric Le Goater 372177183755SCédric Le Goater if (ics_valid_irq(ics, irq)) { 372277183755SCédric Le Goater return ics->qirqs[irq - ics->offset]; 372377183755SCédric Le Goater } 372477183755SCédric Le Goater 372577183755SCédric Le Goater return NULL; 372677183755SCédric Le Goater } 372777183755SCédric Le Goater 37286449da45SCédric Le Goater static void spapr_pic_print_info(InterruptStatsProvider *obj, 37296449da45SCédric Le Goater Monitor *mon) 37306449da45SCédric Le Goater { 37316449da45SCédric Le Goater sPAPRMachineState *spapr = SPAPR_MACHINE(obj); 37325bc8d26dSCédric Le Goater CPUState *cs; 37336449da45SCédric Le Goater 37345bc8d26dSCédric Le Goater CPU_FOREACH(cs) { 37355bc8d26dSCédric Le Goater PowerPCCPU *cpu = POWERPC_CPU(cs); 37365bc8d26dSCédric Le Goater 37375bc8d26dSCédric Le Goater icp_pic_print_info(ICP(cpu->intc), mon); 37386449da45SCédric Le Goater } 37396449da45SCédric Le Goater 37406449da45SCédric Le Goater ics_pic_print_info(spapr->ics, mon); 37416449da45SCédric Le Goater } 37426449da45SCédric Le Goater 37432e886fb3SSam Bobroff int spapr_vcpu_id(PowerPCCPU *cpu) 37442e886fb3SSam Bobroff { 37452e886fb3SSam Bobroff CPUState *cs = CPU(cpu); 37462e886fb3SSam Bobroff 37472e886fb3SSam Bobroff if (kvm_enabled()) { 37482e886fb3SSam Bobroff return kvm_arch_vcpu_id(cs); 37492e886fb3SSam Bobroff } else { 37502e886fb3SSam Bobroff return cs->cpu_index; 37512e886fb3SSam Bobroff } 37522e886fb3SSam Bobroff } 37532e886fb3SSam Bobroff 37542e886fb3SSam Bobroff PowerPCCPU *spapr_find_cpu(int vcpu_id) 37552e886fb3SSam Bobroff { 37562e886fb3SSam Bobroff CPUState *cs; 37572e886fb3SSam Bobroff 37582e886fb3SSam Bobroff CPU_FOREACH(cs) { 37592e886fb3SSam Bobroff PowerPCCPU *cpu = POWERPC_CPU(cs); 37602e886fb3SSam Bobroff 37612e886fb3SSam Bobroff if (spapr_vcpu_id(cpu) == vcpu_id) { 37622e886fb3SSam Bobroff return cpu; 37632e886fb3SSam Bobroff } 37642e886fb3SSam Bobroff } 37652e886fb3SSam Bobroff 37662e886fb3SSam Bobroff return NULL; 37672e886fb3SSam Bobroff } 37682e886fb3SSam Bobroff 376929ee3247SAlexey Kardashevskiy static void spapr_machine_class_init(ObjectClass *oc, void *data) 377053018216SPaolo Bonzini { 377129ee3247SAlexey Kardashevskiy MachineClass *mc = MACHINE_CLASS(oc); 3772224245bfSDavid Gibson sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(oc); 377371461b0fSAlexey Kardashevskiy FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); 377434316482SAlexey Kardashevskiy NMIClass *nc = NMI_CLASS(oc); 3775c20d332aSBharata B Rao HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); 37761d1be34dSDavid Gibson PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); 37777844e12bSCédric Le Goater XICSFabricClass *xic = XICS_FABRIC_CLASS(oc); 37786449da45SCédric Le Goater InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); 377929ee3247SAlexey Kardashevskiy 37800eb9054cSDavid Gibson mc->desc = "pSeries Logical Partition (PAPR compliant)"; 3781fc9f38c3SDavid Gibson 3782fc9f38c3SDavid Gibson /* 3783fc9f38c3SDavid Gibson * We set up the default / latest behaviour here. The class_init 3784fc9f38c3SDavid Gibson * functions for the specific versioned machine types can override 3785fc9f38c3SDavid Gibson * these details for backwards compatibility 3786fc9f38c3SDavid Gibson */ 3787bcb5ce08SDavid Gibson mc->init = spapr_machine_init; 3788bcb5ce08SDavid Gibson mc->reset = spapr_machine_reset; 3789958db90cSMarcel Apfelbaum mc->block_default_type = IF_SCSI; 37906244bb7eSGreg Kurz mc->max_cpus = 1024; 3791958db90cSMarcel Apfelbaum mc->no_parallel = 1; 37925b2128d2SAlexander Graf mc->default_boot_order = ""; 3793a34944feSNikunj A Dadhania mc->default_ram_size = 512 * M_BYTE; 3794958db90cSMarcel Apfelbaum mc->kvm_type = spapr_kvm_type; 37959e3f9733SAlexander Graf mc->has_dynamic_sysbus = true; 3796e4024630SLaurent Vivier mc->pci_allow_0_address = true; 37977ebaf795SBharata B Rao mc->get_hotplug_handler = spapr_get_hotplug_handler; 379894a94e4cSBharata B Rao hc->pre_plug = spapr_machine_device_pre_plug; 3799c20d332aSBharata B Rao hc->plug = spapr_machine_device_plug; 3800ea089eebSIgor Mammedov mc->cpu_index_to_instance_props = spapr_cpu_index_to_props; 380179e07936SIgor Mammedov mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id; 3802535455fdSIgor Mammedov mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids; 3803cf632463SBharata B Rao hc->unplug_request = spapr_machine_device_unplug_request; 380400b4fbe2SMarcel Apfelbaum 3805fc9f38c3SDavid Gibson smc->dr_lmb_enabled = true; 38062e9c10ebSIgor Mammedov mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); 3807c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = true; 380852b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; 380971461b0fSAlexey Kardashevskiy fwc->get_dev_path = spapr_get_fw_dev_path; 381034316482SAlexey Kardashevskiy nc->nmi_monitor_handler = spapr_nmi; 38116737d9adSDavid Gibson smc->phb_placement = spapr_phb_placement; 38121d1be34dSDavid Gibson vhc->hypercall = emulate_spapr_hypercall; 3813e57ca75cSDavid Gibson vhc->hpt_mask = spapr_hpt_mask; 3814e57ca75cSDavid Gibson vhc->map_hptes = spapr_map_hptes; 3815e57ca75cSDavid Gibson vhc->unmap_hptes = spapr_unmap_hptes; 3816e57ca75cSDavid Gibson vhc->store_hpte = spapr_store_hpte; 38179861bb3eSSuraj Jitindar Singh vhc->get_patbe = spapr_get_patbe; 38181ec26c75SGreg Kurz vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr; 38197844e12bSCédric Le Goater xic->ics_get = spapr_ics_get; 38207844e12bSCédric Le Goater xic->ics_resend = spapr_ics_resend; 3821b2fc59aaSCédric Le Goater xic->icp_get = spapr_icp_get; 38226449da45SCédric Le Goater ispc->print_info = spapr_pic_print_info; 382355641213SLaurent Vivier /* Force NUMA node memory size to be a multiple of 382455641213SLaurent Vivier * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity 382555641213SLaurent Vivier * in which LMBs are represented and hot-added 382655641213SLaurent Vivier */ 382755641213SLaurent Vivier mc->numa_mem_align_shift = 28; 382833face6bSDavid Gibson 382933face6bSDavid Gibson smc->default_caps = spapr_caps(0); 383033face6bSDavid Gibson spapr_caps_add_properties(smc, &error_abort); 383153018216SPaolo Bonzini } 383253018216SPaolo Bonzini 383329ee3247SAlexey Kardashevskiy static const TypeInfo spapr_machine_info = { 383429ee3247SAlexey Kardashevskiy .name = TYPE_SPAPR_MACHINE, 383529ee3247SAlexey Kardashevskiy .parent = TYPE_MACHINE, 38364aee7362SDavid Gibson .abstract = true, 38376ca1502eSAlexey Kardashevskiy .instance_size = sizeof(sPAPRMachineState), 3838bcb5ce08SDavid Gibson .instance_init = spapr_instance_init, 383987bbdd9cSDavid Gibson .instance_finalize = spapr_machine_finalizefn, 3840183930c0SDavid Gibson .class_size = sizeof(sPAPRMachineClass), 384129ee3247SAlexey Kardashevskiy .class_init = spapr_machine_class_init, 384271461b0fSAlexey Kardashevskiy .interfaces = (InterfaceInfo[]) { 384371461b0fSAlexey Kardashevskiy { TYPE_FW_PATH_PROVIDER }, 384434316482SAlexey Kardashevskiy { TYPE_NMI }, 3845c20d332aSBharata B Rao { TYPE_HOTPLUG_HANDLER }, 38461d1be34dSDavid Gibson { TYPE_PPC_VIRTUAL_HYPERVISOR }, 38477844e12bSCédric Le Goater { TYPE_XICS_FABRIC }, 38486449da45SCédric Le Goater { TYPE_INTERRUPT_STATS_PROVIDER }, 384971461b0fSAlexey Kardashevskiy { } 385071461b0fSAlexey Kardashevskiy }, 385129ee3247SAlexey Kardashevskiy }; 385229ee3247SAlexey Kardashevskiy 3853fccbc785SDavid Gibson #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \ 38545013c547SDavid Gibson static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \ 38555013c547SDavid Gibson void *data) \ 38565013c547SDavid Gibson { \ 38575013c547SDavid Gibson MachineClass *mc = MACHINE_CLASS(oc); \ 38585013c547SDavid Gibson spapr_machine_##suffix##_class_options(mc); \ 3859fccbc785SDavid Gibson if (latest) { \ 3860fccbc785SDavid Gibson mc->alias = "pseries"; \ 3861fccbc785SDavid Gibson mc->is_default = 1; \ 3862fccbc785SDavid Gibson } \ 38635013c547SDavid Gibson } \ 38645013c547SDavid Gibson static void spapr_machine_##suffix##_instance_init(Object *obj) \ 38655013c547SDavid Gibson { \ 38665013c547SDavid Gibson MachineState *machine = MACHINE(obj); \ 38675013c547SDavid Gibson spapr_machine_##suffix##_instance_options(machine); \ 38685013c547SDavid Gibson } \ 38695013c547SDavid Gibson static const TypeInfo spapr_machine_##suffix##_info = { \ 38705013c547SDavid Gibson .name = MACHINE_TYPE_NAME("pseries-" verstr), \ 38715013c547SDavid Gibson .parent = TYPE_SPAPR_MACHINE, \ 38725013c547SDavid Gibson .class_init = spapr_machine_##suffix##_class_init, \ 38735013c547SDavid Gibson .instance_init = spapr_machine_##suffix##_instance_init, \ 38745013c547SDavid Gibson }; \ 38755013c547SDavid Gibson static void spapr_machine_register_##suffix(void) \ 38765013c547SDavid Gibson { \ 38775013c547SDavid Gibson type_register(&spapr_machine_##suffix##_info); \ 38785013c547SDavid Gibson } \ 38790e6aac87SEduardo Habkost type_init(spapr_machine_register_##suffix) 38805013c547SDavid Gibson 38811c5f29bbSDavid Gibson /* 38822b615412SDavid Gibson * pseries-2.12 3883e2676b16SGreg Kurz */ 38842b615412SDavid Gibson static void spapr_machine_2_12_instance_options(MachineState *machine) 3885e2676b16SGreg Kurz { 3886e2676b16SGreg Kurz } 3887e2676b16SGreg Kurz 38882b615412SDavid Gibson static void spapr_machine_2_12_class_options(MachineClass *mc) 3889e2676b16SGreg Kurz { 3890e2676b16SGreg Kurz /* Defaults for the latest behaviour inherited from the base class */ 3891e2676b16SGreg Kurz } 3892e2676b16SGreg Kurz 38932b615412SDavid Gibson DEFINE_SPAPR_MACHINE(2_12, "2.12", true); 38942b615412SDavid Gibson 38952b615412SDavid Gibson /* 38962b615412SDavid Gibson * pseries-2.11 38972b615412SDavid Gibson */ 38982b615412SDavid Gibson #define SPAPR_COMPAT_2_11 \ 38992b615412SDavid Gibson HW_COMPAT_2_11 39002b615412SDavid Gibson 39012b615412SDavid Gibson static void spapr_machine_2_11_instance_options(MachineState *machine) 39022b615412SDavid Gibson { 39032b615412SDavid Gibson spapr_machine_2_12_instance_options(machine); 39042b615412SDavid Gibson } 39052b615412SDavid Gibson 39062b615412SDavid Gibson static void spapr_machine_2_11_class_options(MachineClass *mc) 39072b615412SDavid Gibson { 3908*ee76a09fSDavid Gibson sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 3909*ee76a09fSDavid Gibson 39102b615412SDavid Gibson spapr_machine_2_12_class_options(mc); 3911*ee76a09fSDavid Gibson smc->default_caps = spapr_caps(SPAPR_CAP_HTM); 39122b615412SDavid Gibson SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_11); 39132b615412SDavid Gibson } 39142b615412SDavid Gibson 39152b615412SDavid Gibson DEFINE_SPAPR_MACHINE(2_11, "2.11", false); 3916e2676b16SGreg Kurz 3917e2676b16SGreg Kurz /* 39183fa14fbeSDavid Gibson * pseries-2.10 3919db800b21SDavid Gibson */ 3920e2676b16SGreg Kurz #define SPAPR_COMPAT_2_10 \ 39212b615412SDavid Gibson HW_COMPAT_2_10 3922e2676b16SGreg Kurz 39233fa14fbeSDavid Gibson static void spapr_machine_2_10_instance_options(MachineState *machine) 3924db800b21SDavid Gibson { 39252b615412SDavid Gibson spapr_machine_2_11_instance_options(machine); 3926db800b21SDavid Gibson } 3927db800b21SDavid Gibson 39283fa14fbeSDavid Gibson static void spapr_machine_2_10_class_options(MachineClass *mc) 3929db800b21SDavid Gibson { 3930e2676b16SGreg Kurz spapr_machine_2_11_class_options(mc); 3931e2676b16SGreg Kurz SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_10); 3932db800b21SDavid Gibson } 3933db800b21SDavid Gibson 3934e2676b16SGreg Kurz DEFINE_SPAPR_MACHINE(2_10, "2.10", false); 39353fa14fbeSDavid Gibson 39363fa14fbeSDavid Gibson /* 39373fa14fbeSDavid Gibson * pseries-2.9 39383fa14fbeSDavid Gibson */ 39393fa14fbeSDavid Gibson #define SPAPR_COMPAT_2_9 \ 3940d5fc133eSDavid Gibson HW_COMPAT_2_9 \ 3941d5fc133eSDavid Gibson { \ 3942d5fc133eSDavid Gibson .driver = TYPE_POWERPC_CPU, \ 3943d5fc133eSDavid Gibson .property = "pre-2.10-migration", \ 3944d5fc133eSDavid Gibson .value = "on", \ 3945d5fc133eSDavid Gibson }, \ 39463fa14fbeSDavid Gibson 39473fa14fbeSDavid Gibson static void spapr_machine_2_9_instance_options(MachineState *machine) 39483fa14fbeSDavid Gibson { 39493fa14fbeSDavid Gibson spapr_machine_2_10_instance_options(machine); 39503fa14fbeSDavid Gibson } 39513fa14fbeSDavid Gibson 39523fa14fbeSDavid Gibson static void spapr_machine_2_9_class_options(MachineClass *mc) 39533fa14fbeSDavid Gibson { 395446f7afa3SGreg Kurz sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 395546f7afa3SGreg Kurz 39563fa14fbeSDavid Gibson spapr_machine_2_10_class_options(mc); 39573fa14fbeSDavid Gibson SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_9); 39583bfe5716SLaurent Vivier mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram; 395946f7afa3SGreg Kurz smc->pre_2_10_has_unused_icps = true; 396052b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED; 39613fa14fbeSDavid Gibson } 39623fa14fbeSDavid Gibson 39633fa14fbeSDavid Gibson DEFINE_SPAPR_MACHINE(2_9, "2.9", false); 3964fa325e6cSDavid Gibson 3965fa325e6cSDavid Gibson /* 3966fa325e6cSDavid Gibson * pseries-2.8 3967fa325e6cSDavid Gibson */ 3968fa325e6cSDavid Gibson #define SPAPR_COMPAT_2_8 \ 396982516263SDavid Gibson HW_COMPAT_2_8 \ 397082516263SDavid Gibson { \ 397182516263SDavid Gibson .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \ 397282516263SDavid Gibson .property = "pcie-extended-configuration-space", \ 397382516263SDavid Gibson .value = "off", \ 397482516263SDavid Gibson }, 3975fa325e6cSDavid Gibson 3976fa325e6cSDavid Gibson static void spapr_machine_2_8_instance_options(MachineState *machine) 3977fa325e6cSDavid Gibson { 3978fa325e6cSDavid Gibson spapr_machine_2_9_instance_options(machine); 3979fa325e6cSDavid Gibson } 3980fa325e6cSDavid Gibson 3981fa325e6cSDavid Gibson static void spapr_machine_2_8_class_options(MachineClass *mc) 3982fa325e6cSDavid Gibson { 3983fa325e6cSDavid Gibson spapr_machine_2_9_class_options(mc); 3984fa325e6cSDavid Gibson SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_8); 398555641213SLaurent Vivier mc->numa_mem_align_shift = 23; 3986fa325e6cSDavid Gibson } 3987fa325e6cSDavid Gibson 3988fa325e6cSDavid Gibson DEFINE_SPAPR_MACHINE(2_8, "2.8", false); 3989db800b21SDavid Gibson 3990db800b21SDavid Gibson /* 39911ea1eefcSBharata B Rao * pseries-2.7 39921ea1eefcSBharata B Rao */ 3993db800b21SDavid Gibson #define SPAPR_COMPAT_2_7 \ 3994db800b21SDavid Gibson HW_COMPAT_2_7 \ 3995357d1e3bSDavid Gibson { \ 3996357d1e3bSDavid Gibson .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \ 3997357d1e3bSDavid Gibson .property = "mem_win_size", \ 3998357d1e3bSDavid Gibson .value = stringify(SPAPR_PCI_2_7_MMIO_WIN_SIZE),\ 3999357d1e3bSDavid Gibson }, \ 4000357d1e3bSDavid Gibson { \ 4001357d1e3bSDavid Gibson .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \ 4002357d1e3bSDavid Gibson .property = "mem64_win_size", \ 4003357d1e3bSDavid Gibson .value = "0", \ 4004146c11f1SDavid Gibson }, \ 4005146c11f1SDavid Gibson { \ 4006146c11f1SDavid Gibson .driver = TYPE_POWERPC_CPU, \ 4007146c11f1SDavid Gibson .property = "pre-2.8-migration", \ 4008146c11f1SDavid Gibson .value = "on", \ 40095c4537bdSDavid Gibson }, \ 40105c4537bdSDavid Gibson { \ 40115c4537bdSDavid Gibson .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \ 40125c4537bdSDavid Gibson .property = "pre-2.8-migration", \ 40135c4537bdSDavid Gibson .value = "on", \ 4014357d1e3bSDavid Gibson }, 4015357d1e3bSDavid Gibson 4016357d1e3bSDavid Gibson static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index, 4017357d1e3bSDavid Gibson uint64_t *buid, hwaddr *pio, 4018357d1e3bSDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 4019357d1e3bSDavid Gibson unsigned n_dma, uint32_t *liobns, Error **errp) 4020357d1e3bSDavid Gibson { 4021357d1e3bSDavid Gibson /* Legacy PHB placement for pseries-2.7 and earlier machine types */ 4022357d1e3bSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 4023357d1e3bSDavid Gibson const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */ 4024357d1e3bSDavid Gibson const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */ 4025357d1e3bSDavid Gibson const hwaddr pio_offset = 0x80000000; /* 2 GiB */ 4026357d1e3bSDavid Gibson const uint32_t max_index = 255; 4027357d1e3bSDavid Gibson const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */ 4028357d1e3bSDavid Gibson 4029357d1e3bSDavid Gibson uint64_t ram_top = MACHINE(spapr)->ram_size; 4030357d1e3bSDavid Gibson hwaddr phb0_base, phb_base; 4031357d1e3bSDavid Gibson int i; 4032357d1e3bSDavid Gibson 4033357d1e3bSDavid Gibson /* Do we have hotpluggable memory? */ 4034357d1e3bSDavid Gibson if (MACHINE(spapr)->maxram_size > ram_top) { 4035357d1e3bSDavid Gibson /* Can't just use maxram_size, because there may be an 4036357d1e3bSDavid Gibson * alignment gap between normal and hotpluggable memory 4037357d1e3bSDavid Gibson * regions */ 4038357d1e3bSDavid Gibson ram_top = spapr->hotplug_memory.base + 4039357d1e3bSDavid Gibson memory_region_size(&spapr->hotplug_memory.mr); 4040357d1e3bSDavid Gibson } 4041357d1e3bSDavid Gibson 4042357d1e3bSDavid Gibson phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment); 4043357d1e3bSDavid Gibson 4044357d1e3bSDavid Gibson if (index > max_index) { 4045357d1e3bSDavid Gibson error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)", 4046357d1e3bSDavid Gibson max_index); 4047357d1e3bSDavid Gibson return; 4048357d1e3bSDavid Gibson } 4049357d1e3bSDavid Gibson 4050357d1e3bSDavid Gibson *buid = base_buid + index; 4051357d1e3bSDavid Gibson for (i = 0; i < n_dma; ++i) { 4052357d1e3bSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 4053357d1e3bSDavid Gibson } 4054357d1e3bSDavid Gibson 4055357d1e3bSDavid Gibson phb_base = phb0_base + index * phb_spacing; 4056357d1e3bSDavid Gibson *pio = phb_base + pio_offset; 4057357d1e3bSDavid Gibson *mmio32 = phb_base + mmio_offset; 4058357d1e3bSDavid Gibson /* 4059357d1e3bSDavid Gibson * We don't set the 64-bit MMIO window, relying on the PHB's 4060357d1e3bSDavid Gibson * fallback behaviour of automatically splitting a large "32-bit" 4061357d1e3bSDavid Gibson * window into contiguous 32-bit and 64-bit windows 4062357d1e3bSDavid Gibson */ 4063357d1e3bSDavid Gibson } 4064db800b21SDavid Gibson 40651ea1eefcSBharata B Rao static void spapr_machine_2_7_instance_options(MachineState *machine) 40661ea1eefcSBharata B Rao { 4067f6229214SMichael Roth sPAPRMachineState *spapr = SPAPR_MACHINE(machine); 4068f6229214SMichael Roth 4069672de881SMichael Roth spapr_machine_2_8_instance_options(machine); 4070f6229214SMichael Roth spapr->use_hotplug_event_source = false; 40711ea1eefcSBharata B Rao } 40721ea1eefcSBharata B Rao 40731ea1eefcSBharata B Rao static void spapr_machine_2_7_class_options(MachineClass *mc) 40741ea1eefcSBharata B Rao { 40753daa4a9fSThomas Huth sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 40763daa4a9fSThomas Huth 4077db800b21SDavid Gibson spapr_machine_2_8_class_options(mc); 40782e9c10ebSIgor Mammedov mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3"); 4079db800b21SDavid Gibson SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_7); 4080357d1e3bSDavid Gibson smc->phb_placement = phb_placement_2_7; 40811ea1eefcSBharata B Rao } 40821ea1eefcSBharata B Rao 4083db800b21SDavid Gibson DEFINE_SPAPR_MACHINE(2_7, "2.7", false); 40841ea1eefcSBharata B Rao 40851ea1eefcSBharata B Rao /* 40864b23699cSDavid Gibson * pseries-2.6 40874b23699cSDavid Gibson */ 40881ea1eefcSBharata B Rao #define SPAPR_COMPAT_2_6 \ 4089ae4de14cSAlexey Kardashevskiy HW_COMPAT_2_6 \ 4090ae4de14cSAlexey Kardashevskiy { \ 4091ae4de14cSAlexey Kardashevskiy .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\ 4092ae4de14cSAlexey Kardashevskiy .property = "ddw",\ 4093ae4de14cSAlexey Kardashevskiy .value = stringify(off),\ 4094ae4de14cSAlexey Kardashevskiy }, 40951ea1eefcSBharata B Rao 40964b23699cSDavid Gibson static void spapr_machine_2_6_instance_options(MachineState *machine) 40974b23699cSDavid Gibson { 4098672de881SMichael Roth spapr_machine_2_7_instance_options(machine); 40994b23699cSDavid Gibson } 41004b23699cSDavid Gibson 41014b23699cSDavid Gibson static void spapr_machine_2_6_class_options(MachineClass *mc) 41024b23699cSDavid Gibson { 41031ea1eefcSBharata B Rao spapr_machine_2_7_class_options(mc); 4104c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = false; 41051ea1eefcSBharata B Rao SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_6); 41064b23699cSDavid Gibson } 41074b23699cSDavid Gibson 41081ea1eefcSBharata B Rao DEFINE_SPAPR_MACHINE(2_6, "2.6", false); 41094b23699cSDavid Gibson 41104b23699cSDavid Gibson /* 41111c5f29bbSDavid Gibson * pseries-2.5 41121c5f29bbSDavid Gibson */ 41134b23699cSDavid Gibson #define SPAPR_COMPAT_2_5 \ 411457c522f4SThomas Huth HW_COMPAT_2_5 \ 411557c522f4SThomas Huth { \ 411657c522f4SThomas Huth .driver = "spapr-vlan", \ 411757c522f4SThomas Huth .property = "use-rx-buffer-pools", \ 411857c522f4SThomas Huth .value = "off", \ 411957c522f4SThomas Huth }, 41204b23699cSDavid Gibson 41215013c547SDavid Gibson static void spapr_machine_2_5_instance_options(MachineState *machine) 41221c5f29bbSDavid Gibson { 4123672de881SMichael Roth spapr_machine_2_6_instance_options(machine); 41245013c547SDavid Gibson } 41255013c547SDavid Gibson 41265013c547SDavid Gibson static void spapr_machine_2_5_class_options(MachineClass *mc) 41275013c547SDavid Gibson { 412857040d45SThomas Huth sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 412957040d45SThomas Huth 41304b23699cSDavid Gibson spapr_machine_2_6_class_options(mc); 413157040d45SThomas Huth smc->use_ohci_by_default = true; 41324b23699cSDavid Gibson SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_5); 41331c5f29bbSDavid Gibson } 41341c5f29bbSDavid Gibson 41354b23699cSDavid Gibson DEFINE_SPAPR_MACHINE(2_5, "2.5", false); 41361c5f29bbSDavid Gibson 41371c5f29bbSDavid Gibson /* 41381c5f29bbSDavid Gibson * pseries-2.4 41391c5f29bbSDavid Gibson */ 414080fd50f9SCornelia Huck #define SPAPR_COMPAT_2_4 \ 414180fd50f9SCornelia Huck HW_COMPAT_2_4 414280fd50f9SCornelia Huck 41435013c547SDavid Gibson static void spapr_machine_2_4_instance_options(MachineState *machine) 41441c5f29bbSDavid Gibson { 41455013c547SDavid Gibson spapr_machine_2_5_instance_options(machine); 41465013c547SDavid Gibson } 41471c5f29bbSDavid Gibson 41485013c547SDavid Gibson static void spapr_machine_2_4_class_options(MachineClass *mc) 41495013c547SDavid Gibson { 4150fc9f38c3SDavid Gibson sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4151fc9f38c3SDavid Gibson 4152fc9f38c3SDavid Gibson spapr_machine_2_5_class_options(mc); 4153fc9f38c3SDavid Gibson smc->dr_lmb_enabled = false; 4154f949b4e5SDavid Gibson SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_4); 41551c5f29bbSDavid Gibson } 41561c5f29bbSDavid Gibson 4157fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_4, "2.4", false); 41581c5f29bbSDavid Gibson 41591c5f29bbSDavid Gibson /* 41601c5f29bbSDavid Gibson * pseries-2.3 41611c5f29bbSDavid Gibson */ 416238ff32c6SEduardo Habkost #define SPAPR_COMPAT_2_3 \ 41637619c7b0SMichael Roth HW_COMPAT_2_3 \ 41647619c7b0SMichael Roth {\ 41657619c7b0SMichael Roth .driver = "spapr-pci-host-bridge",\ 41667619c7b0SMichael Roth .property = "dynamic-reconfiguration",\ 41677619c7b0SMichael Roth .value = "off",\ 41687619c7b0SMichael Roth }, 416938ff32c6SEduardo Habkost 41705013c547SDavid Gibson static void spapr_machine_2_3_instance_options(MachineState *machine) 41711c5f29bbSDavid Gibson { 41725013c547SDavid Gibson spapr_machine_2_4_instance_options(machine); 41731c5f29bbSDavid Gibson } 41741c5f29bbSDavid Gibson 41755013c547SDavid Gibson static void spapr_machine_2_3_class_options(MachineClass *mc) 41761c5f29bbSDavid Gibson { 4177fc9f38c3SDavid Gibson spapr_machine_2_4_class_options(mc); 4178f949b4e5SDavid Gibson SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_3); 41791c5f29bbSDavid Gibson } 4180fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_3, "2.3", false); 41811c5f29bbSDavid Gibson 41821c5f29bbSDavid Gibson /* 41831c5f29bbSDavid Gibson * pseries-2.2 41841c5f29bbSDavid Gibson */ 41851c5f29bbSDavid Gibson 4186b194df47SAlexey Kardashevskiy #define SPAPR_COMPAT_2_2 \ 41874dfd8eaaSEduardo Habkost HW_COMPAT_2_2 \ 4188b194df47SAlexey Kardashevskiy {\ 4189b194df47SAlexey Kardashevskiy .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\ 4190b194df47SAlexey Kardashevskiy .property = "mem_win_size",\ 4191b194df47SAlexey Kardashevskiy .value = "0x20000000",\ 4192dd754bafSEduardo Habkost }, 4193b194df47SAlexey Kardashevskiy 41945013c547SDavid Gibson static void spapr_machine_2_2_instance_options(MachineState *machine) 4195b0e966d0SJason Wang { 41965013c547SDavid Gibson spapr_machine_2_3_instance_options(machine); 4197cba0e779SGreg Kurz machine->suppress_vmdesc = true; 4198b0e966d0SJason Wang } 4199b0e966d0SJason Wang 42005013c547SDavid Gibson static void spapr_machine_2_2_class_options(MachineClass *mc) 4201b0e966d0SJason Wang { 4202fc9f38c3SDavid Gibson spapr_machine_2_3_class_options(mc); 4203f949b4e5SDavid Gibson SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_2); 42041c5f29bbSDavid Gibson } 4205fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_2, "2.2", false); 42061c5f29bbSDavid Gibson 42071c5f29bbSDavid Gibson /* 42081c5f29bbSDavid Gibson * pseries-2.1 42091c5f29bbSDavid Gibson */ 42101c5f29bbSDavid Gibson #define SPAPR_COMPAT_2_1 \ 42111c5f29bbSDavid Gibson HW_COMPAT_2_1 42121c5f29bbSDavid Gibson 42135013c547SDavid Gibson static void spapr_machine_2_1_instance_options(MachineState *machine) 42141c5f29bbSDavid Gibson { 42155013c547SDavid Gibson spapr_machine_2_2_instance_options(machine); 42161c5f29bbSDavid Gibson } 42171c5f29bbSDavid Gibson 42185013c547SDavid Gibson static void spapr_machine_2_1_class_options(MachineClass *mc) 4219b0e966d0SJason Wang { 4220fc9f38c3SDavid Gibson spapr_machine_2_2_class_options(mc); 4221f949b4e5SDavid Gibson SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_1); 42226026db45SAlexey Kardashevskiy } 4223fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_1, "2.1", false); 42246026db45SAlexey Kardashevskiy 422529ee3247SAlexey Kardashevskiy static void spapr_machine_register_types(void) 422629ee3247SAlexey Kardashevskiy { 422729ee3247SAlexey Kardashevskiy type_register_static(&spapr_machine_info); 422829ee3247SAlexey Kardashevskiy } 422929ee3247SAlexey Kardashevskiy 423029ee3247SAlexey Kardashevskiy type_init(spapr_machine_register_types) 4231