153018216SPaolo Bonzini /* 253018216SPaolo Bonzini * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 353018216SPaolo Bonzini * 453018216SPaolo Bonzini * Copyright (c) 2004-2007 Fabrice Bellard 553018216SPaolo Bonzini * Copyright (c) 2007 Jocelyn Mayer 653018216SPaolo Bonzini * Copyright (c) 2010 David Gibson, IBM Corporation. 753018216SPaolo Bonzini * 853018216SPaolo Bonzini * Permission is hereby granted, free of charge, to any person obtaining a copy 953018216SPaolo Bonzini * of this software and associated documentation files (the "Software"), to deal 1053018216SPaolo Bonzini * in the Software without restriction, including without limitation the rights 1153018216SPaolo Bonzini * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 1253018216SPaolo Bonzini * copies of the Software, and to permit persons to whom the Software is 1353018216SPaolo Bonzini * furnished to do so, subject to the following conditions: 1453018216SPaolo Bonzini * 1553018216SPaolo Bonzini * The above copyright notice and this permission notice shall be included in 1653018216SPaolo Bonzini * all copies or substantial portions of the Software. 1753018216SPaolo Bonzini * 1853018216SPaolo Bonzini * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1953018216SPaolo Bonzini * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2053018216SPaolo Bonzini * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 2153018216SPaolo Bonzini * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2253018216SPaolo Bonzini * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2353018216SPaolo Bonzini * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 2453018216SPaolo Bonzini * THE SOFTWARE. 2553018216SPaolo Bonzini */ 26a8d25326SMarkus Armbruster 270d75590dSPeter Maydell #include "qemu/osdep.h" 28a8d25326SMarkus Armbruster #include "qemu-common.h" 29da34e65cSMarkus Armbruster #include "qapi/error.h" 30fa98fbfcSSam Bobroff #include "qapi/visitor.h" 3153018216SPaolo Bonzini #include "sysemu/sysemu.h" 32b58c5c2dSMarkus Armbruster #include "sysemu/hostmem.h" 33e35704baSEduardo Habkost #include "sysemu/numa.h" 3423ff81bdSGreg Kurz #include "sysemu/qtest.h" 3571e8a915SMarkus Armbruster #include "sysemu/reset.h" 3654d31236SMarkus Armbruster #include "sysemu/runstate.h" 3703dd024fSPaolo Bonzini #include "qemu/log.h" 3871461b0fSAlexey Kardashevskiy #include "hw/fw-path-provider.h" 3953018216SPaolo Bonzini #include "elf.h" 4053018216SPaolo Bonzini #include "net/net.h" 41ad440b4aSAndrew Jones #include "sysemu/device_tree.h" 4253018216SPaolo Bonzini #include "sysemu/cpus.h" 43b3946626SVincent Palatin #include "sysemu/hw_accel.h" 4453018216SPaolo Bonzini #include "kvm_ppc.h" 45c4b63b7cSJuan Quintela #include "migration/misc.h" 46ca77ee28SMarkus Armbruster #include "migration/qemu-file-types.h" 4784a899deSJuan Quintela #include "migration/global_state.h" 48f2a8f0a6SJuan Quintela #include "migration/register.h" 494be21d56SDavid Gibson #include "mmu-hash64.h" 50b4db5413SSuraj Jitindar Singh #include "mmu-book3s-v3.h" 517abd43baSSuraj Jitindar Singh #include "cpu-models.h" 522e5b09fdSMarkus Armbruster #include "hw/core/cpu.h" 5353018216SPaolo Bonzini 5453018216SPaolo Bonzini #include "hw/boards.h" 550d09e41aSPaolo Bonzini #include "hw/ppc/ppc.h" 5653018216SPaolo Bonzini #include "hw/loader.h" 5753018216SPaolo Bonzini 587804c353SCédric Le Goater #include "hw/ppc/fdt.h" 590d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h" 600d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h" 61a27bd6c7SMarkus Armbruster #include "hw/qdev-properties.h" 620d09e41aSPaolo Bonzini #include "hw/pci-host/spapr.h" 6353018216SPaolo Bonzini #include "hw/pci/msi.h" 6453018216SPaolo Bonzini 6553018216SPaolo Bonzini #include "hw/pci/pci.h" 6671461b0fSAlexey Kardashevskiy #include "hw/scsi/scsi.h" 6771461b0fSAlexey Kardashevskiy #include "hw/virtio/virtio-scsi.h" 68c4e13492SFelipe Franciosi #include "hw/virtio/vhost-scsi-common.h" 6953018216SPaolo Bonzini 7053018216SPaolo Bonzini #include "exec/address-spaces.h" 712309832aSDavid Gibson #include "exec/ram_addr.h" 7253018216SPaolo Bonzini #include "hw/usb.h" 7353018216SPaolo Bonzini #include "qemu/config-file.h" 74135a129aSAneesh Kumar K.V #include "qemu/error-report.h" 752a6593cbSAlexey Kardashevskiy #include "trace.h" 7634316482SAlexey Kardashevskiy #include "hw/nmi.h" 776449da45SCédric Le Goater #include "hw/intc/intc.h" 7853018216SPaolo Bonzini 7994a94e4cSBharata B Rao #include "hw/ppc/spapr_cpu_core.h" 802cc0e2e8SDavid Hildenbrand #include "hw/mem/memory-device.h" 810fb6bd07SMichael Roth #include "hw/ppc/spapr_tpm_proxy.h" 8268a27b20SMichael S. Tsirkin 83f041d6afSGreg Kurz #include "monitor/monitor.h" 84f041d6afSGreg Kurz 8553018216SPaolo Bonzini #include <libfdt.h> 8653018216SPaolo Bonzini 8753018216SPaolo Bonzini /* SLOF memory layout: 8853018216SPaolo Bonzini * 8953018216SPaolo Bonzini * SLOF raw image loaded at 0, copies its romfs right below the flat 9053018216SPaolo Bonzini * device-tree, then position SLOF itself 31M below that 9153018216SPaolo Bonzini * 9253018216SPaolo Bonzini * So we set FW_OVERHEAD to 40MB which should account for all of that 9353018216SPaolo Bonzini * and more 9453018216SPaolo Bonzini * 9553018216SPaolo Bonzini * We load our kernel at 4M, leaving space for SLOF initial image 9653018216SPaolo Bonzini */ 9738b02bd8SAlexey Kardashevskiy #define FDT_MAX_SIZE 0x100000 98b7d1f77aSBenjamin Herrenschmidt #define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */ 9953018216SPaolo Bonzini #define FW_MAX_SIZE 0x400000 10053018216SPaolo Bonzini #define FW_FILE_NAME "slof.bin" 10153018216SPaolo Bonzini #define FW_OVERHEAD 0x2800000 10253018216SPaolo Bonzini #define KERNEL_LOAD_ADDR FW_MAX_SIZE 10353018216SPaolo Bonzini 10453018216SPaolo Bonzini #define MIN_RMA_SLOF 128UL 10553018216SPaolo Bonzini 1065c7adcf4SGreg Kurz #define PHANDLE_INTC 0x00001111 10753018216SPaolo Bonzini 1085d0fb150SGreg Kurz /* These two functions implement the VCPU id numbering: one to compute them 1095d0fb150SGreg Kurz * all and one to identify thread 0 of a VCORE. Any change to the first one 1105d0fb150SGreg Kurz * is likely to have an impact on the second one, so let's keep them close. 1115d0fb150SGreg Kurz */ 112ce2918cbSDavid Gibson static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index) 1135d0fb150SGreg Kurz { 114fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 115fe6b6346SLike Xu unsigned int smp_threads = ms->smp.threads; 116fe6b6346SLike Xu 1171a5008fcSGreg Kurz assert(spapr->vsmt); 1185d0fb150SGreg Kurz return 1195d0fb150SGreg Kurz (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads; 1205d0fb150SGreg Kurz } 121ce2918cbSDavid Gibson static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr, 1225d0fb150SGreg Kurz PowerPCCPU *cpu) 1235d0fb150SGreg Kurz { 1241a5008fcSGreg Kurz assert(spapr->vsmt); 1255d0fb150SGreg Kurz return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0; 1265d0fb150SGreg Kurz } 1275d0fb150SGreg Kurz 12846f7afa3SGreg Kurz static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque) 12946f7afa3SGreg Kurz { 13046f7afa3SGreg Kurz /* Dummy entries correspond to unused ICPState objects in older QEMUs, 13146f7afa3SGreg Kurz * and newer QEMUs don't even have them. In both cases, we don't want 13246f7afa3SGreg Kurz * to send anything on the wire. 13346f7afa3SGreg Kurz */ 13446f7afa3SGreg Kurz return false; 13546f7afa3SGreg Kurz } 13646f7afa3SGreg Kurz 13746f7afa3SGreg Kurz static const VMStateDescription pre_2_10_vmstate_dummy_icp = { 13846f7afa3SGreg Kurz .name = "icp/server", 13946f7afa3SGreg Kurz .version_id = 1, 14046f7afa3SGreg Kurz .minimum_version_id = 1, 14146f7afa3SGreg Kurz .needed = pre_2_10_vmstate_dummy_icp_needed, 14246f7afa3SGreg Kurz .fields = (VMStateField[]) { 14346f7afa3SGreg Kurz VMSTATE_UNUSED(4), /* uint32_t xirr */ 14446f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t pending_priority */ 14546f7afa3SGreg Kurz VMSTATE_UNUSED(1), /* uint8_t mfrr */ 14646f7afa3SGreg Kurz VMSTATE_END_OF_LIST() 14746f7afa3SGreg Kurz }, 14846f7afa3SGreg Kurz }; 14946f7afa3SGreg Kurz 15046f7afa3SGreg Kurz static void pre_2_10_vmstate_register_dummy_icp(int i) 15146f7afa3SGreg Kurz { 15246f7afa3SGreg Kurz vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp, 15346f7afa3SGreg Kurz (void *)(uintptr_t) i); 15446f7afa3SGreg Kurz } 15546f7afa3SGreg Kurz 15646f7afa3SGreg Kurz static void pre_2_10_vmstate_unregister_dummy_icp(int i) 15746f7afa3SGreg Kurz { 15846f7afa3SGreg Kurz vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp, 15946f7afa3SGreg Kurz (void *)(uintptr_t) i); 16046f7afa3SGreg Kurz } 16146f7afa3SGreg Kurz 162ce2918cbSDavid Gibson int spapr_max_server_number(SpaprMachineState *spapr) 16346f7afa3SGreg Kurz { 164fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 165fe6b6346SLike Xu 1661a5008fcSGreg Kurz assert(spapr->vsmt); 167fe6b6346SLike Xu return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads); 16846f7afa3SGreg Kurz } 16946f7afa3SGreg Kurz 170833d4668SAlexey Kardashevskiy static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, 171833d4668SAlexey Kardashevskiy int smt_threads) 172833d4668SAlexey Kardashevskiy { 173833d4668SAlexey Kardashevskiy int i, ret = 0; 174833d4668SAlexey Kardashevskiy uint32_t servers_prop[smt_threads]; 175833d4668SAlexey Kardashevskiy uint32_t gservers_prop[smt_threads * 2]; 17614bb4486SGreg Kurz int index = spapr_get_vcpu_id(cpu); 177833d4668SAlexey Kardashevskiy 178d6e166c0SDavid Gibson if (cpu->compat_pvr) { 179d6e166c0SDavid Gibson ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr); 1806d9412eaSAlexey Kardashevskiy if (ret < 0) { 1816d9412eaSAlexey Kardashevskiy return ret; 1826d9412eaSAlexey Kardashevskiy } 1836d9412eaSAlexey Kardashevskiy } 1846d9412eaSAlexey Kardashevskiy 185833d4668SAlexey Kardashevskiy /* Build interrupt servers and gservers properties */ 186833d4668SAlexey Kardashevskiy for (i = 0; i < smt_threads; i++) { 187833d4668SAlexey Kardashevskiy servers_prop[i] = cpu_to_be32(index + i); 188833d4668SAlexey Kardashevskiy /* Hack, direct the group queues back to cpu 0 */ 189833d4668SAlexey Kardashevskiy gservers_prop[i*2] = cpu_to_be32(index + i); 190833d4668SAlexey Kardashevskiy gservers_prop[i*2 + 1] = 0; 191833d4668SAlexey Kardashevskiy } 192833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", 193833d4668SAlexey Kardashevskiy servers_prop, sizeof(servers_prop)); 194833d4668SAlexey Kardashevskiy if (ret < 0) { 195833d4668SAlexey Kardashevskiy return ret; 196833d4668SAlexey Kardashevskiy } 197833d4668SAlexey Kardashevskiy ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s", 198833d4668SAlexey Kardashevskiy gservers_prop, sizeof(gservers_prop)); 199833d4668SAlexey Kardashevskiy 200833d4668SAlexey Kardashevskiy return ret; 201833d4668SAlexey Kardashevskiy } 202833d4668SAlexey Kardashevskiy 20399861ecbSIgor Mammedov static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, PowerPCCPU *cpu) 2040da6f3feSBharata B Rao { 20514bb4486SGreg Kurz int index = spapr_get_vcpu_id(cpu); 2060da6f3feSBharata B Rao uint32_t associativity[] = {cpu_to_be32(0x5), 2070da6f3feSBharata B Rao cpu_to_be32(0x0), 2080da6f3feSBharata B Rao cpu_to_be32(0x0), 2090da6f3feSBharata B Rao cpu_to_be32(0x0), 21015f8b142SIgor Mammedov cpu_to_be32(cpu->node_id), 2110da6f3feSBharata B Rao cpu_to_be32(index)}; 2120da6f3feSBharata B Rao 2130da6f3feSBharata B Rao /* Advertise NUMA via ibm,associativity */ 21499861ecbSIgor Mammedov return fdt_setprop(fdt, offset, "ibm,associativity", associativity, 2150da6f3feSBharata B Rao sizeof(associativity)); 2160da6f3feSBharata B Rao } 2170da6f3feSBharata B Rao 21886d5771aSSam Bobroff /* Populate the "ibm,pa-features" property */ 219ce2918cbSDavid Gibson static void spapr_populate_pa_features(SpaprMachineState *spapr, 220ee76a09fSDavid Gibson PowerPCCPU *cpu, 221daa36379SDavid Gibson void *fdt, int offset) 22286d5771aSSam Bobroff { 22386d5771aSSam Bobroff uint8_t pa_features_206[] = { 6, 0, 22486d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 }; 22586d5771aSSam Bobroff uint8_t pa_features_207[] = { 24, 0, 22686d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, 22786d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 22886d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 22986d5771aSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; 2309fb4541fSSam Bobroff uint8_t pa_features_300[] = { 66, 0, 2319fb4541fSSam Bobroff /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ 2329fb4541fSSam Bobroff /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */ 23386d5771aSSam Bobroff 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ 2349fb4541fSSam Bobroff /* 6: DS207 */ 23586d5771aSSam Bobroff 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ 2369fb4541fSSam Bobroff /* 16: Vector */ 23786d5771aSSam Bobroff 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ 2389fb4541fSSam Bobroff /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ 2399bf502feSDavid Gibson 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ 2409fb4541fSSam Bobroff /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ 2419fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ 2429fb4541fSSam Bobroff /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */ 2439fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ 2449fb4541fSSam Bobroff /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */ 2459fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */ 2469fb4541fSSam Bobroff /* 42: PM, 44: PC RA, 46: SC vec'd */ 2479fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ 2489fb4541fSSam Bobroff /* 48: SIMD, 50: QP BFP, 52: String */ 2499fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ 2509fb4541fSSam Bobroff /* 54: DecFP, 56: DecI, 58: SHA */ 2519fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ 2529fb4541fSSam Bobroff /* 60: NM atomic, 62: RNG */ 2539fb4541fSSam Bobroff 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ 2549fb4541fSSam Bobroff }; 2557abd43baSSuraj Jitindar Singh uint8_t *pa_features = NULL; 25686d5771aSSam Bobroff size_t pa_size; 25786d5771aSSam Bobroff 2587abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) { 25986d5771aSSam Bobroff pa_features = pa_features_206; 26086d5771aSSam Bobroff pa_size = sizeof(pa_features_206); 2617abd43baSSuraj Jitindar Singh } 2627abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) { 26386d5771aSSam Bobroff pa_features = pa_features_207; 26486d5771aSSam Bobroff pa_size = sizeof(pa_features_207); 2657abd43baSSuraj Jitindar Singh } 2667abd43baSSuraj Jitindar Singh if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) { 26786d5771aSSam Bobroff pa_features = pa_features_300; 26886d5771aSSam Bobroff pa_size = sizeof(pa_features_300); 2697abd43baSSuraj Jitindar Singh } 2707abd43baSSuraj Jitindar Singh if (!pa_features) { 27186d5771aSSam Bobroff return; 27286d5771aSSam Bobroff } 27386d5771aSSam Bobroff 27426cd35b8SDavid Gibson if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) { 27586d5771aSSam Bobroff /* 27686d5771aSSam Bobroff * Note: we keep CI large pages off by default because a 64K capable 27786d5771aSSam Bobroff * guest provisioned with large pages might otherwise try to map a qemu 27886d5771aSSam Bobroff * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages 27986d5771aSSam Bobroff * even if that qemu runs on a 4k host. 28086d5771aSSam Bobroff * We dd this bit back here if we are confident this is not an issue 28186d5771aSSam Bobroff */ 28286d5771aSSam Bobroff pa_features[3] |= 0x20; 28386d5771aSSam Bobroff } 2844e5fe368SSuraj Jitindar Singh if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) { 28586d5771aSSam Bobroff pa_features[24] |= 0x80; /* Transactional memory support */ 28686d5771aSSam Bobroff } 287daa36379SDavid Gibson if (spapr->cas_pre_isa3_guest && pa_size > 40) { 288e957f6a9SSam Bobroff /* Workaround for broken kernels that attempt (guest) radix 289e957f6a9SSam Bobroff * mode when they can't handle it, if they see the radix bit set 290e957f6a9SSam Bobroff * in pa-features. So hide it from them. */ 291e957f6a9SSam Bobroff pa_features[40 + 2] &= ~0x80; /* Radix MMU */ 292e957f6a9SSam Bobroff } 29386d5771aSSam Bobroff 29486d5771aSSam Bobroff _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size))); 29586d5771aSSam Bobroff } 29686d5771aSSam Bobroff 297c86c1affSDaniel Henrique Barboza static hwaddr spapr_node0_size(MachineState *machine) 298b082d65aSAlexey Kardashevskiy { 299aa570207STao Xu if (machine->numa_state->num_nodes) { 300b082d65aSAlexey Kardashevskiy int i; 301aa570207STao Xu for (i = 0; i < machine->numa_state->num_nodes; ++i) { 3027e721e7bSTao Xu if (machine->numa_state->nodes[i].node_mem) { 3037e721e7bSTao Xu return MIN(pow2floor(machine->numa_state->nodes[i].node_mem), 304fb164994SDavid Gibson machine->ram_size); 305b082d65aSAlexey Kardashevskiy } 306b082d65aSAlexey Kardashevskiy } 307b082d65aSAlexey Kardashevskiy } 308fb164994SDavid Gibson return machine->ram_size; 309b082d65aSAlexey Kardashevskiy } 310b082d65aSAlexey Kardashevskiy 311a1d59c0fSAlexey Kardashevskiy static void add_str(GString *s, const gchar *s1) 312a1d59c0fSAlexey Kardashevskiy { 313a1d59c0fSAlexey Kardashevskiy g_string_append_len(s, s1, strlen(s1) + 1); 314a1d59c0fSAlexey Kardashevskiy } 31553018216SPaolo Bonzini 31603d196b7SBharata B Rao static int spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start, 31726a8c353SAlexey Kardashevskiy hwaddr size) 31826a8c353SAlexey Kardashevskiy { 31926a8c353SAlexey Kardashevskiy uint32_t associativity[] = { 32026a8c353SAlexey Kardashevskiy cpu_to_be32(0x4), /* length */ 32126a8c353SAlexey Kardashevskiy cpu_to_be32(0x0), cpu_to_be32(0x0), 322c3b4f589SAlexey Kardashevskiy cpu_to_be32(0x0), cpu_to_be32(nodeid) 32326a8c353SAlexey Kardashevskiy }; 32426a8c353SAlexey Kardashevskiy char mem_name[32]; 32526a8c353SAlexey Kardashevskiy uint64_t mem_reg_property[2]; 32626a8c353SAlexey Kardashevskiy int off; 32726a8c353SAlexey Kardashevskiy 32826a8c353SAlexey Kardashevskiy mem_reg_property[0] = cpu_to_be64(start); 32926a8c353SAlexey Kardashevskiy mem_reg_property[1] = cpu_to_be64(size); 33026a8c353SAlexey Kardashevskiy 3313a17e38fSAlexey Kardashevskiy sprintf(mem_name, "memory@%" HWADDR_PRIx, start); 33226a8c353SAlexey Kardashevskiy off = fdt_add_subnode(fdt, 0, mem_name); 33326a8c353SAlexey Kardashevskiy _FDT(off); 33426a8c353SAlexey Kardashevskiy _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 33526a8c353SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 33626a8c353SAlexey Kardashevskiy sizeof(mem_reg_property)))); 33726a8c353SAlexey Kardashevskiy _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, 33826a8c353SAlexey Kardashevskiy sizeof(associativity)))); 33903d196b7SBharata B Rao return off; 34026a8c353SAlexey Kardashevskiy } 34126a8c353SAlexey Kardashevskiy 342ce2918cbSDavid Gibson static int spapr_populate_memory(SpaprMachineState *spapr, void *fdt) 34353018216SPaolo Bonzini { 344fb164994SDavid Gibson MachineState *machine = MACHINE(spapr); 3457db8a127SAlexey Kardashevskiy hwaddr mem_start, node_size; 346aa570207STao Xu int i, nb_nodes = machine->numa_state->num_nodes; 3477e721e7bSTao Xu NodeInfo *nodes = machine->numa_state->nodes; 34853018216SPaolo Bonzini 3497db8a127SAlexey Kardashevskiy for (i = 0, mem_start = 0; i < nb_nodes; ++i) { 3507db8a127SAlexey Kardashevskiy if (!nodes[i].node_mem) { 3517db8a127SAlexey Kardashevskiy continue; 35253018216SPaolo Bonzini } 353fb164994SDavid Gibson if (mem_start >= machine->ram_size) { 3545fe269b1SPaul Mackerras node_size = 0; 3555fe269b1SPaul Mackerras } else { 3567db8a127SAlexey Kardashevskiy node_size = nodes[i].node_mem; 357fb164994SDavid Gibson if (node_size > machine->ram_size - mem_start) { 358fb164994SDavid Gibson node_size = machine->ram_size - mem_start; 3595fe269b1SPaul Mackerras } 3605fe269b1SPaul Mackerras } 3617db8a127SAlexey Kardashevskiy if (!mem_start) { 362b472b1a7SDaniel Henrique Barboza /* spapr_machine_init() checks for rma_size <= node0_size 363b472b1a7SDaniel Henrique Barboza * already */ 364e8f986fcSBharata B Rao spapr_populate_memory_node(fdt, i, 0, spapr->rma_size); 3657db8a127SAlexey Kardashevskiy mem_start += spapr->rma_size; 3667db8a127SAlexey Kardashevskiy node_size -= spapr->rma_size; 3677db8a127SAlexey Kardashevskiy } 3686010818cSAlexey Kardashevskiy for ( ; node_size; ) { 3696010818cSAlexey Kardashevskiy hwaddr sizetmp = pow2floor(node_size); 3706010818cSAlexey Kardashevskiy 3716010818cSAlexey Kardashevskiy /* mem_start != 0 here */ 3726010818cSAlexey Kardashevskiy if (ctzl(mem_start) < ctzl(sizetmp)) { 3736010818cSAlexey Kardashevskiy sizetmp = 1ULL << ctzl(mem_start); 3746010818cSAlexey Kardashevskiy } 3756010818cSAlexey Kardashevskiy 3766010818cSAlexey Kardashevskiy spapr_populate_memory_node(fdt, i, mem_start, sizetmp); 3776010818cSAlexey Kardashevskiy node_size -= sizetmp; 3786010818cSAlexey Kardashevskiy mem_start += sizetmp; 3796010818cSAlexey Kardashevskiy } 38053018216SPaolo Bonzini } 38153018216SPaolo Bonzini 38253018216SPaolo Bonzini return 0; 38353018216SPaolo Bonzini } 38453018216SPaolo Bonzini 3850da6f3feSBharata B Rao static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset, 386ce2918cbSDavid Gibson SpaprMachineState *spapr) 3870da6f3feSBharata B Rao { 388fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 3890da6f3feSBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(cs); 3900da6f3feSBharata B Rao CPUPPCState *env = &cpu->env; 3910da6f3feSBharata B Rao PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 39214bb4486SGreg Kurz int index = spapr_get_vcpu_id(cpu); 3930da6f3feSBharata B Rao uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 3940da6f3feSBharata B Rao 0xffffffff, 0xffffffff}; 395afd10a0fSBharata B Rao uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() 396afd10a0fSBharata B Rao : SPAPR_TIMEBASE_FREQ; 3970da6f3feSBharata B Rao uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; 3980da6f3feSBharata B Rao uint32_t page_sizes_prop[64]; 3990da6f3feSBharata B Rao size_t page_sizes_prop_size; 400fe6b6346SLike Xu unsigned int smp_threads = ms->smp.threads; 401fe6b6346SLike Xu uint32_t vcpus_per_socket = smp_threads * ms->smp.cores; 4020da6f3feSBharata B Rao uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; 403abbc1247SDavid Gibson int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu)); 404ce2918cbSDavid Gibson SpaprDrc *drc; 405af81cf32SBharata B Rao int drc_index; 406c64abd1fSSam Bobroff uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ]; 407c64abd1fSSam Bobroff int i; 408af81cf32SBharata B Rao 409fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index); 410af81cf32SBharata B Rao if (drc) { 4110b55aa91SDavid Gibson drc_index = spapr_drc_index(drc); 412af81cf32SBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index))); 413af81cf32SBharata B Rao } 4140da6f3feSBharata B Rao 4150da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "reg", index))); 4160da6f3feSBharata B Rao _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu"))); 4170da6f3feSBharata B Rao 4180da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR]))); 4190da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size", 4200da6f3feSBharata B Rao env->dcache_line_size))); 4210da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size", 4220da6f3feSBharata B Rao env->dcache_line_size))); 4230da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size", 4240da6f3feSBharata B Rao env->icache_line_size))); 4250da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size", 4260da6f3feSBharata B Rao env->icache_line_size))); 4270da6f3feSBharata B Rao 4280da6f3feSBharata B Rao if (pcc->l1_dcache_size) { 4290da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size", 4300da6f3feSBharata B Rao pcc->l1_dcache_size))); 4310da6f3feSBharata B Rao } else { 4323dc6f869SAlistair Francis warn_report("Unknown L1 dcache size for cpu"); 4330da6f3feSBharata B Rao } 4340da6f3feSBharata B Rao if (pcc->l1_icache_size) { 4350da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size", 4360da6f3feSBharata B Rao pcc->l1_icache_size))); 4370da6f3feSBharata B Rao } else { 4383dc6f869SAlistair Francis warn_report("Unknown L1 icache size for cpu"); 4390da6f3feSBharata B Rao } 4400da6f3feSBharata B Rao 4410da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq))); 4420da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq))); 44367d7d66fSDavid Gibson _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size))); 44467d7d66fSDavid Gibson _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size))); 4450da6f3feSBharata B Rao _FDT((fdt_setprop_string(fdt, offset, "status", "okay"))); 4460da6f3feSBharata B Rao _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0))); 4470da6f3feSBharata B Rao 4480da6f3feSBharata B Rao if (env->spr_cb[SPR_PURR].oea_read) { 44983f192d3SSuraj Jitindar Singh _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1))); 45083f192d3SSuraj Jitindar Singh } 45183f192d3SSuraj Jitindar Singh if (env->spr_cb[SPR_SPURR].oea_read) { 45283f192d3SSuraj Jitindar Singh _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1))); 4530da6f3feSBharata B Rao } 4540da6f3feSBharata B Rao 45558969eeeSDavid Gibson if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) { 4560da6f3feSBharata B Rao _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes", 4570da6f3feSBharata B Rao segs, sizeof(segs)))); 4580da6f3feSBharata B Rao } 4590da6f3feSBharata B Rao 46029386642SDavid Gibson /* Advertise VSX (vector extensions) if available 4610da6f3feSBharata B Rao * 1 == VMX / Altivec available 46229386642SDavid Gibson * 2 == VSX available 46329386642SDavid Gibson * 46429386642SDavid Gibson * Only CPUs for which we create core types in spapr_cpu_core.c 46529386642SDavid Gibson * are possible, and all of those have VMX */ 4664e5fe368SSuraj Jitindar Singh if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) { 46729386642SDavid Gibson _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2))); 46829386642SDavid Gibson } else { 46929386642SDavid Gibson _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1))); 4700da6f3feSBharata B Rao } 4710da6f3feSBharata B Rao 4720da6f3feSBharata B Rao /* Advertise DFP (Decimal Floating Point) if available 4730da6f3feSBharata B Rao * 0 / no property == no DFP 4740da6f3feSBharata B Rao * 1 == DFP available */ 4754e5fe368SSuraj Jitindar Singh if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) { 4760da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1))); 4770da6f3feSBharata B Rao } 4780da6f3feSBharata B Rao 479644a2c99SDavid Gibson page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop, 4800da6f3feSBharata B Rao sizeof(page_sizes_prop)); 4810da6f3feSBharata B Rao if (page_sizes_prop_size) { 4820da6f3feSBharata B Rao _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes", 4830da6f3feSBharata B Rao page_sizes_prop, page_sizes_prop_size))); 4840da6f3feSBharata B Rao } 4850da6f3feSBharata B Rao 486daa36379SDavid Gibson spapr_populate_pa_features(spapr, cpu, fdt, offset); 48790da0d5aSBenjamin Herrenschmidt 4880da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", 48922419c2aSDavid Gibson cs->cpu_index / vcpus_per_socket))); 4900da6f3feSBharata B Rao 4910da6f3feSBharata B Rao _FDT((fdt_setprop(fdt, offset, "ibm,pft-size", 4920da6f3feSBharata B Rao pft_size_prop, sizeof(pft_size_prop)))); 4930da6f3feSBharata B Rao 494aa570207STao Xu if (ms->numa_state->num_nodes > 1) { 49599861ecbSIgor Mammedov _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cpu)); 49699861ecbSIgor Mammedov } 4970da6f3feSBharata B Rao 49812dbeb16SDavid Gibson _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt)); 499c64abd1fSSam Bobroff 500c64abd1fSSam Bobroff if (pcc->radix_page_info) { 501c64abd1fSSam Bobroff for (i = 0; i < pcc->radix_page_info->count; i++) { 502c64abd1fSSam Bobroff radix_AP_encodings[i] = 503c64abd1fSSam Bobroff cpu_to_be32(pcc->radix_page_info->entries[i]); 504c64abd1fSSam Bobroff } 505c64abd1fSSam Bobroff _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings", 506c64abd1fSSam Bobroff radix_AP_encodings, 507c64abd1fSSam Bobroff pcc->radix_page_info->count * 508c64abd1fSSam Bobroff sizeof(radix_AP_encodings[0])))); 509c64abd1fSSam Bobroff } 510a8dafa52SSuraj Jitindar Singh 511a8dafa52SSuraj Jitindar Singh /* 512a8dafa52SSuraj Jitindar Singh * We set this property to let the guest know that it can use the large 513a8dafa52SSuraj Jitindar Singh * decrementer and its width in bits. 514a8dafa52SSuraj Jitindar Singh */ 515a8dafa52SSuraj Jitindar Singh if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF) 516a8dafa52SSuraj Jitindar Singh _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits", 517a8dafa52SSuraj Jitindar Singh pcc->lrg_decr_bits))); 5180da6f3feSBharata B Rao } 5190da6f3feSBharata B Rao 520ce2918cbSDavid Gibson static void spapr_populate_cpus_dt_node(void *fdt, SpaprMachineState *spapr) 5210da6f3feSBharata B Rao { 52204d595b3SEmilio G. Cota CPUState **rev; 5230da6f3feSBharata B Rao CPUState *cs; 52404d595b3SEmilio G. Cota int n_cpus; 5250da6f3feSBharata B Rao int cpus_offset; 5260da6f3feSBharata B Rao char *nodename; 52704d595b3SEmilio G. Cota int i; 5280da6f3feSBharata B Rao 5290da6f3feSBharata B Rao cpus_offset = fdt_add_subnode(fdt, 0, "cpus"); 5300da6f3feSBharata B Rao _FDT(cpus_offset); 5310da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1))); 5320da6f3feSBharata B Rao _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0))); 5330da6f3feSBharata B Rao 5340da6f3feSBharata B Rao /* 5350da6f3feSBharata B Rao * We walk the CPUs in reverse order to ensure that CPU DT nodes 5360da6f3feSBharata B Rao * created by fdt_add_subnode() end up in the right order in FDT 5370da6f3feSBharata B Rao * for the guest kernel the enumerate the CPUs correctly. 53804d595b3SEmilio G. Cota * 53904d595b3SEmilio G. Cota * The CPU list cannot be traversed in reverse order, so we need 54004d595b3SEmilio G. Cota * to do extra work. 5410da6f3feSBharata B Rao */ 54204d595b3SEmilio G. Cota n_cpus = 0; 54304d595b3SEmilio G. Cota rev = NULL; 54404d595b3SEmilio G. Cota CPU_FOREACH(cs) { 54504d595b3SEmilio G. Cota rev = g_renew(CPUState *, rev, n_cpus + 1); 54604d595b3SEmilio G. Cota rev[n_cpus++] = cs; 54704d595b3SEmilio G. Cota } 54804d595b3SEmilio G. Cota 54904d595b3SEmilio G. Cota for (i = n_cpus - 1; i >= 0; i--) { 55004d595b3SEmilio G. Cota CPUState *cs = rev[i]; 5510da6f3feSBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(cs); 55214bb4486SGreg Kurz int index = spapr_get_vcpu_id(cpu); 5530da6f3feSBharata B Rao DeviceClass *dc = DEVICE_GET_CLASS(cs); 5540da6f3feSBharata B Rao int offset; 5550da6f3feSBharata B Rao 5565d0fb150SGreg Kurz if (!spapr_is_thread0_in_vcore(spapr, cpu)) { 5570da6f3feSBharata B Rao continue; 5580da6f3feSBharata B Rao } 5590da6f3feSBharata B Rao 5600da6f3feSBharata B Rao nodename = g_strdup_printf("%s@%x", dc->fw_name, index); 5610da6f3feSBharata B Rao offset = fdt_add_subnode(fdt, cpus_offset, nodename); 5620da6f3feSBharata B Rao g_free(nodename); 5630da6f3feSBharata B Rao _FDT(offset); 5640da6f3feSBharata B Rao spapr_populate_cpu_dt(cs, fdt, offset, spapr); 5650da6f3feSBharata B Rao } 5660da6f3feSBharata B Rao 567eceba347SEmilio G. Cota g_free(rev); 5680da6f3feSBharata B Rao } 5690da6f3feSBharata B Rao 5700e947a89SThomas Huth static int spapr_rng_populate_dt(void *fdt) 5710e947a89SThomas Huth { 5720e947a89SThomas Huth int node; 5730e947a89SThomas Huth int ret; 5740e947a89SThomas Huth 5750e947a89SThomas Huth node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities"); 5760e947a89SThomas Huth if (node <= 0) { 5770e947a89SThomas Huth return -1; 5780e947a89SThomas Huth } 5790e947a89SThomas Huth ret = fdt_setprop_string(fdt, node, "device_type", 5800e947a89SThomas Huth "ibm,platform-facilities"); 5810e947a89SThomas Huth ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1); 5820e947a89SThomas Huth ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0); 5830e947a89SThomas Huth 5840e947a89SThomas Huth node = fdt_add_subnode(fdt, node, "ibm,random-v1"); 5850e947a89SThomas Huth if (node <= 0) { 5860e947a89SThomas Huth return -1; 5870e947a89SThomas Huth } 5880e947a89SThomas Huth ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random"); 5890e947a89SThomas Huth 5900e947a89SThomas Huth return ret ? -1 : 0; 5910e947a89SThomas Huth } 5920e947a89SThomas Huth 593f47bd1c8SIgor Mammedov static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr) 594f47bd1c8SIgor Mammedov { 595f47bd1c8SIgor Mammedov MemoryDeviceInfoList *info; 596f47bd1c8SIgor Mammedov 597f47bd1c8SIgor Mammedov for (info = list; info; info = info->next) { 598f47bd1c8SIgor Mammedov MemoryDeviceInfo *value = info->value; 599f47bd1c8SIgor Mammedov 600f47bd1c8SIgor Mammedov if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) { 601f47bd1c8SIgor Mammedov PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data; 602f47bd1c8SIgor Mammedov 603ccc2cef8SDavid Gibson if (addr >= pcdimm_info->addr && 604f47bd1c8SIgor Mammedov addr < (pcdimm_info->addr + pcdimm_info->size)) { 605f47bd1c8SIgor Mammedov return pcdimm_info->node; 606f47bd1c8SIgor Mammedov } 607f47bd1c8SIgor Mammedov } 608f47bd1c8SIgor Mammedov } 609f47bd1c8SIgor Mammedov 610f47bd1c8SIgor Mammedov return -1; 611f47bd1c8SIgor Mammedov } 612f47bd1c8SIgor Mammedov 613a324d6f1SBharata B Rao struct sPAPRDrconfCellV2 { 614a324d6f1SBharata B Rao uint32_t seq_lmbs; 615a324d6f1SBharata B Rao uint64_t base_addr; 616a324d6f1SBharata B Rao uint32_t drc_index; 617a324d6f1SBharata B Rao uint32_t aa_index; 618a324d6f1SBharata B Rao uint32_t flags; 619a324d6f1SBharata B Rao } QEMU_PACKED; 620a324d6f1SBharata B Rao 621a324d6f1SBharata B Rao typedef struct DrconfCellQueue { 622a324d6f1SBharata B Rao struct sPAPRDrconfCellV2 cell; 623a324d6f1SBharata B Rao QSIMPLEQ_ENTRY(DrconfCellQueue) entry; 624a324d6f1SBharata B Rao } DrconfCellQueue; 625a324d6f1SBharata B Rao 626a324d6f1SBharata B Rao static DrconfCellQueue * 627a324d6f1SBharata B Rao spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr, 628a324d6f1SBharata B Rao uint32_t drc_index, uint32_t aa_index, 629a324d6f1SBharata B Rao uint32_t flags) 63003d196b7SBharata B Rao { 631a324d6f1SBharata B Rao DrconfCellQueue *elem; 632a324d6f1SBharata B Rao 633a324d6f1SBharata B Rao elem = g_malloc0(sizeof(*elem)); 634a324d6f1SBharata B Rao elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs); 635a324d6f1SBharata B Rao elem->cell.base_addr = cpu_to_be64(base_addr); 636a324d6f1SBharata B Rao elem->cell.drc_index = cpu_to_be32(drc_index); 637a324d6f1SBharata B Rao elem->cell.aa_index = cpu_to_be32(aa_index); 638a324d6f1SBharata B Rao elem->cell.flags = cpu_to_be32(flags); 639a324d6f1SBharata B Rao 640a324d6f1SBharata B Rao return elem; 641a324d6f1SBharata B Rao } 642a324d6f1SBharata B Rao 643a324d6f1SBharata B Rao /* ibm,dynamic-memory-v2 */ 644ce2918cbSDavid Gibson static int spapr_populate_drmem_v2(SpaprMachineState *spapr, void *fdt, 645a324d6f1SBharata B Rao int offset, MemoryDeviceInfoList *dimms) 646a324d6f1SBharata B Rao { 647b0c14ec4SDavid Hildenbrand MachineState *machine = MACHINE(spapr); 648cc941111SFabiano Rosas uint8_t *int_buf, *cur_index; 649a324d6f1SBharata B Rao int ret; 65003d196b7SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 651a324d6f1SBharata B Rao uint64_t addr, cur_addr, size; 652b0c14ec4SDavid Hildenbrand uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size); 653b0c14ec4SDavid Hildenbrand uint64_t mem_end = machine->device_memory->base + 654b0c14ec4SDavid Hildenbrand memory_region_size(&machine->device_memory->mr); 655cc941111SFabiano Rosas uint32_t node, buf_len, nr_entries = 0; 656ce2918cbSDavid Gibson SpaprDrc *drc; 657a324d6f1SBharata B Rao DrconfCellQueue *elem, *next; 658a324d6f1SBharata B Rao MemoryDeviceInfoList *info; 659a324d6f1SBharata B Rao QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue 660a324d6f1SBharata B Rao = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue); 661a324d6f1SBharata B Rao 662a324d6f1SBharata B Rao /* Entry to cover RAM and the gap area */ 663a324d6f1SBharata B Rao elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1, 664a324d6f1SBharata B Rao SPAPR_LMB_FLAGS_RESERVED | 665a324d6f1SBharata B Rao SPAPR_LMB_FLAGS_DRC_INVALID); 666a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 667a324d6f1SBharata B Rao nr_entries++; 668a324d6f1SBharata B Rao 669b0c14ec4SDavid Hildenbrand cur_addr = machine->device_memory->base; 670a324d6f1SBharata B Rao for (info = dimms; info; info = info->next) { 671a324d6f1SBharata B Rao PCDIMMDeviceInfo *di = info->value->u.dimm.data; 672a324d6f1SBharata B Rao 673a324d6f1SBharata B Rao addr = di->addr; 674a324d6f1SBharata B Rao size = di->size; 675a324d6f1SBharata B Rao node = di->node; 676a324d6f1SBharata B Rao 677a324d6f1SBharata B Rao /* Entry for hot-pluggable area */ 678a324d6f1SBharata B Rao if (cur_addr < addr) { 679a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); 680a324d6f1SBharata B Rao g_assert(drc); 681a324d6f1SBharata B Rao elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size, 682a324d6f1SBharata B Rao cur_addr, spapr_drc_index(drc), -1, 0); 683a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 684a324d6f1SBharata B Rao nr_entries++; 685a324d6f1SBharata B Rao } 686a324d6f1SBharata B Rao 687a324d6f1SBharata B Rao /* Entry for DIMM */ 688a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size); 689a324d6f1SBharata B Rao g_assert(drc); 690a324d6f1SBharata B Rao elem = spapr_get_drconf_cell(size / lmb_size, addr, 691a324d6f1SBharata B Rao spapr_drc_index(drc), node, 692a324d6f1SBharata B Rao SPAPR_LMB_FLAGS_ASSIGNED); 693a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 694a324d6f1SBharata B Rao nr_entries++; 695a324d6f1SBharata B Rao cur_addr = addr + size; 696a324d6f1SBharata B Rao } 697a324d6f1SBharata B Rao 698a324d6f1SBharata B Rao /* Entry for remaining hotpluggable area */ 699a324d6f1SBharata B Rao if (cur_addr < mem_end) { 700a324d6f1SBharata B Rao drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); 701a324d6f1SBharata B Rao g_assert(drc); 702a324d6f1SBharata B Rao elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size, 703a324d6f1SBharata B Rao cur_addr, spapr_drc_index(drc), -1, 0); 704a324d6f1SBharata B Rao QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); 705a324d6f1SBharata B Rao nr_entries++; 706a324d6f1SBharata B Rao } 707a324d6f1SBharata B Rao 708a324d6f1SBharata B Rao buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t); 709a324d6f1SBharata B Rao int_buf = cur_index = g_malloc0(buf_len); 710a324d6f1SBharata B Rao *(uint32_t *)int_buf = cpu_to_be32(nr_entries); 711a324d6f1SBharata B Rao cur_index += sizeof(nr_entries); 712a324d6f1SBharata B Rao 713a324d6f1SBharata B Rao QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) { 714a324d6f1SBharata B Rao memcpy(cur_index, &elem->cell, sizeof(elem->cell)); 715a324d6f1SBharata B Rao cur_index += sizeof(elem->cell); 716a324d6f1SBharata B Rao QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry); 717a324d6f1SBharata B Rao g_free(elem); 718a324d6f1SBharata B Rao } 719a324d6f1SBharata B Rao 720a324d6f1SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len); 721a324d6f1SBharata B Rao g_free(int_buf); 722a324d6f1SBharata B Rao if (ret < 0) { 723a324d6f1SBharata B Rao return -1; 724a324d6f1SBharata B Rao } 725a324d6f1SBharata B Rao return 0; 726a324d6f1SBharata B Rao } 727a324d6f1SBharata B Rao 728a324d6f1SBharata B Rao /* ibm,dynamic-memory */ 729ce2918cbSDavid Gibson static int spapr_populate_drmem_v1(SpaprMachineState *spapr, void *fdt, 730a324d6f1SBharata B Rao int offset, MemoryDeviceInfoList *dimms) 731a324d6f1SBharata B Rao { 732b0c14ec4SDavid Hildenbrand MachineState *machine = MACHINE(spapr); 733a324d6f1SBharata B Rao int i, ret; 734a324d6f1SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 7350c9269a5SDavid Hildenbrand uint32_t device_lmb_start = machine->device_memory->base / lmb_size; 736b0c14ec4SDavid Hildenbrand uint32_t nr_lmbs = (machine->device_memory->base + 737b0c14ec4SDavid Hildenbrand memory_region_size(&machine->device_memory->mr)) / 738d0e5a8f2SBharata B Rao lmb_size; 73903d196b7SBharata B Rao uint32_t *int_buf, *cur_index, buf_len; 74016c25aefSBharata B Rao 74116c25aefSBharata B Rao /* 742ef001f06SThomas Huth * Allocate enough buffer size to fit in ibm,dynamic-memory 743ef001f06SThomas Huth */ 744a324d6f1SBharata B Rao buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t); 74503d196b7SBharata B Rao cur_index = int_buf = g_malloc0(buf_len); 74603d196b7SBharata B Rao int_buf[0] = cpu_to_be32(nr_lmbs); 74703d196b7SBharata B Rao cur_index++; 74803d196b7SBharata B Rao for (i = 0; i < nr_lmbs; i++) { 749d0e5a8f2SBharata B Rao uint64_t addr = i * lmb_size; 75003d196b7SBharata B Rao uint32_t *dynamic_memory = cur_index; 75103d196b7SBharata B Rao 7520c9269a5SDavid Hildenbrand if (i >= device_lmb_start) { 753ce2918cbSDavid Gibson SpaprDrc *drc; 754d0e5a8f2SBharata B Rao 755fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i); 75603d196b7SBharata B Rao g_assert(drc); 75703d196b7SBharata B Rao 75803d196b7SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 75903d196b7SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 7600b55aa91SDavid Gibson dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc)); 76103d196b7SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 762f47bd1c8SIgor Mammedov dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr)); 763d0e5a8f2SBharata B Rao if (memory_region_present(get_system_memory(), addr)) { 76403d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED); 76503d196b7SBharata B Rao } else { 76603d196b7SBharata B Rao dynamic_memory[5] = cpu_to_be32(0); 76703d196b7SBharata B Rao } 768d0e5a8f2SBharata B Rao } else { 769d0e5a8f2SBharata B Rao /* 770d0e5a8f2SBharata B Rao * LMB information for RMA, boot time RAM and gap b/n RAM and 7710c9269a5SDavid Hildenbrand * device memory region -- all these are marked as reserved 772d0e5a8f2SBharata B Rao * and as having no valid DRC. 773d0e5a8f2SBharata B Rao */ 774d0e5a8f2SBharata B Rao dynamic_memory[0] = cpu_to_be32(addr >> 32); 775d0e5a8f2SBharata B Rao dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); 776d0e5a8f2SBharata B Rao dynamic_memory[2] = cpu_to_be32(0); 777d0e5a8f2SBharata B Rao dynamic_memory[3] = cpu_to_be32(0); /* reserved */ 778d0e5a8f2SBharata B Rao dynamic_memory[4] = cpu_to_be32(-1); 779d0e5a8f2SBharata B Rao dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED | 780d0e5a8f2SBharata B Rao SPAPR_LMB_FLAGS_DRC_INVALID); 781d0e5a8f2SBharata B Rao } 78203d196b7SBharata B Rao 78303d196b7SBharata B Rao cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE; 78403d196b7SBharata B Rao } 78503d196b7SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len); 786a324d6f1SBharata B Rao g_free(int_buf); 78703d196b7SBharata B Rao if (ret < 0) { 788a324d6f1SBharata B Rao return -1; 789a324d6f1SBharata B Rao } 790a324d6f1SBharata B Rao return 0; 791a324d6f1SBharata B Rao } 792a324d6f1SBharata B Rao 793a324d6f1SBharata B Rao /* 794a324d6f1SBharata B Rao * Adds ibm,dynamic-reconfiguration-memory node. 795a324d6f1SBharata B Rao * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation 796a324d6f1SBharata B Rao * of this device tree node. 797a324d6f1SBharata B Rao */ 798ce2918cbSDavid Gibson static int spapr_populate_drconf_memory(SpaprMachineState *spapr, void *fdt) 799a324d6f1SBharata B Rao { 800a324d6f1SBharata B Rao MachineState *machine = MACHINE(spapr); 801aa570207STao Xu int nb_numa_nodes = machine->numa_state->num_nodes; 802a324d6f1SBharata B Rao int ret, i, offset; 803a324d6f1SBharata B Rao uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 804a324d6f1SBharata B Rao uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)}; 805a324d6f1SBharata B Rao uint32_t *int_buf, *cur_index, buf_len; 806a324d6f1SBharata B Rao int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1; 807a324d6f1SBharata B Rao MemoryDeviceInfoList *dimms = NULL; 808a324d6f1SBharata B Rao 809a324d6f1SBharata B Rao /* 8100c9269a5SDavid Hildenbrand * Don't create the node if there is no device memory 811a324d6f1SBharata B Rao */ 812a324d6f1SBharata B Rao if (machine->ram_size == machine->maxram_size) { 813a324d6f1SBharata B Rao return 0; 814a324d6f1SBharata B Rao } 815a324d6f1SBharata B Rao 816a324d6f1SBharata B Rao offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory"); 817a324d6f1SBharata B Rao 818a324d6f1SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size, 819a324d6f1SBharata B Rao sizeof(prop_lmb_size)); 820a324d6f1SBharata B Rao if (ret < 0) { 821a324d6f1SBharata B Rao return ret; 822a324d6f1SBharata B Rao } 823a324d6f1SBharata B Rao 824a324d6f1SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff); 825a324d6f1SBharata B Rao if (ret < 0) { 826a324d6f1SBharata B Rao return ret; 827a324d6f1SBharata B Rao } 828a324d6f1SBharata B Rao 829a324d6f1SBharata B Rao ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0); 830a324d6f1SBharata B Rao if (ret < 0) { 831a324d6f1SBharata B Rao return ret; 832a324d6f1SBharata B Rao } 833a324d6f1SBharata B Rao 834a324d6f1SBharata B Rao /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */ 8352cc0e2e8SDavid Hildenbrand dimms = qmp_memory_device_list(); 836a324d6f1SBharata B Rao if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) { 837a324d6f1SBharata B Rao ret = spapr_populate_drmem_v2(spapr, fdt, offset, dimms); 838a324d6f1SBharata B Rao } else { 839a324d6f1SBharata B Rao ret = spapr_populate_drmem_v1(spapr, fdt, offset, dimms); 840a324d6f1SBharata B Rao } 841a324d6f1SBharata B Rao qapi_free_MemoryDeviceInfoList(dimms); 842a324d6f1SBharata B Rao 843a324d6f1SBharata B Rao if (ret < 0) { 844a324d6f1SBharata B Rao return ret; 84503d196b7SBharata B Rao } 84603d196b7SBharata B Rao 84703d196b7SBharata B Rao /* ibm,associativity-lookup-arrays */ 848a324d6f1SBharata B Rao buf_len = (nr_nodes * 4 + 2) * sizeof(uint32_t); 849a324d6f1SBharata B Rao cur_index = int_buf = g_malloc0(buf_len); 8506663864eSBharata B Rao int_buf[0] = cpu_to_be32(nr_nodes); 85103d196b7SBharata B Rao int_buf[1] = cpu_to_be32(4); /* Number of entries per associativity list */ 85203d196b7SBharata B Rao cur_index += 2; 8536663864eSBharata B Rao for (i = 0; i < nr_nodes; i++) { 85403d196b7SBharata B Rao uint32_t associativity[] = { 85503d196b7SBharata B Rao cpu_to_be32(0x0), 85603d196b7SBharata B Rao cpu_to_be32(0x0), 85703d196b7SBharata B Rao cpu_to_be32(0x0), 85803d196b7SBharata B Rao cpu_to_be32(i) 85903d196b7SBharata B Rao }; 86003d196b7SBharata B Rao memcpy(cur_index, associativity, sizeof(associativity)); 86103d196b7SBharata B Rao cur_index += 4; 86203d196b7SBharata B Rao } 86303d196b7SBharata B Rao ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf, 86403d196b7SBharata B Rao (cur_index - int_buf) * sizeof(uint32_t)); 86503d196b7SBharata B Rao g_free(int_buf); 866a324d6f1SBharata B Rao 86703d196b7SBharata B Rao return ret; 86803d196b7SBharata B Rao } 86903d196b7SBharata B Rao 870ce2918cbSDavid Gibson static int spapr_dt_cas_updates(SpaprMachineState *spapr, void *fdt, 871ce2918cbSDavid Gibson SpaprOptionVector *ov5_updates) 8726787d27bSMichael Roth { 873ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 874417ece33SMichael Roth int ret = 0, offset; 8756787d27bSMichael Roth 8766787d27bSMichael Roth /* Generate ibm,dynamic-reconfiguration-memory node if required */ 8776787d27bSMichael Roth if (spapr_ovec_test(ov5_updates, OV5_DRCONF_MEMORY)) { 8786787d27bSMichael Roth g_assert(smc->dr_lmb_enabled); 8796787d27bSMichael Roth ret = spapr_populate_drconf_memory(spapr, fdt); 880417ece33SMichael Roth if (ret) { 8819b6c1da5SDaniel Henrique Barboza return ret; 882417ece33SMichael Roth } 8836787d27bSMichael Roth } 8846787d27bSMichael Roth 885417ece33SMichael Roth offset = fdt_path_offset(fdt, "/chosen"); 886417ece33SMichael Roth if (offset < 0) { 887417ece33SMichael Roth offset = fdt_add_subnode(fdt, 0, "chosen"); 888417ece33SMichael Roth if (offset < 0) { 889417ece33SMichael Roth return offset; 890417ece33SMichael Roth } 891417ece33SMichael Roth } 8929b6c1da5SDaniel Henrique Barboza return spapr_ovec_populate_dt(fdt, offset, spapr->ov5_cas, 893417ece33SMichael Roth "ibm,architecture-vec-5"); 8946787d27bSMichael Roth } 8956787d27bSMichael Roth 896ce2918cbSDavid Gibson static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) 8973f5dabceSDavid Gibson { 898fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 8993f5dabceSDavid Gibson int rtas; 9003f5dabceSDavid Gibson GString *hypertas = g_string_sized_new(256); 9013f5dabceSDavid Gibson GString *qemu_hypertas = g_string_sized_new(256); 9023f5dabceSDavid Gibson uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) }; 9030c9269a5SDavid Hildenbrand uint64_t max_device_addr = MACHINE(spapr)->device_memory->base + 904b0c14ec4SDavid Hildenbrand memory_region_size(&MACHINE(spapr)->device_memory->mr); 9053f5dabceSDavid Gibson uint32_t lrdr_capacity[] = { 9060c9269a5SDavid Hildenbrand cpu_to_be32(max_device_addr >> 32), 9070c9269a5SDavid Hildenbrand cpu_to_be32(max_device_addr & 0xffffffff), 9083f5dabceSDavid Gibson 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE), 909fe6b6346SLike Xu cpu_to_be32(ms->smp.max_cpus / ms->smp.threads), 9103f5dabceSDavid Gibson }; 911ec132efaSAlexey Kardashevskiy uint32_t maxdomain = cpu_to_be32(spapr->gpu_numa_id > 1 ? 1 : 0); 912da9f80fbSSerhii Popovych uint32_t maxdomains[] = { 913da9f80fbSSerhii Popovych cpu_to_be32(4), 914ec132efaSAlexey Kardashevskiy maxdomain, 915ec132efaSAlexey Kardashevskiy maxdomain, 916ec132efaSAlexey Kardashevskiy maxdomain, 917ec132efaSAlexey Kardashevskiy cpu_to_be32(spapr->gpu_numa_id), 918da9f80fbSSerhii Popovych }; 9193f5dabceSDavid Gibson 9203f5dabceSDavid Gibson _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); 9213f5dabceSDavid Gibson 9223f5dabceSDavid Gibson /* hypertas */ 9233f5dabceSDavid Gibson add_str(hypertas, "hcall-pft"); 9243f5dabceSDavid Gibson add_str(hypertas, "hcall-term"); 9253f5dabceSDavid Gibson add_str(hypertas, "hcall-dabr"); 9263f5dabceSDavid Gibson add_str(hypertas, "hcall-interrupt"); 9273f5dabceSDavid Gibson add_str(hypertas, "hcall-tce"); 9283f5dabceSDavid Gibson add_str(hypertas, "hcall-vio"); 9293f5dabceSDavid Gibson add_str(hypertas, "hcall-splpar"); 93010741314SNicholas Piggin add_str(hypertas, "hcall-join"); 9313f5dabceSDavid Gibson add_str(hypertas, "hcall-bulk"); 9323f5dabceSDavid Gibson add_str(hypertas, "hcall-set-mode"); 9333f5dabceSDavid Gibson add_str(hypertas, "hcall-sprg0"); 9343f5dabceSDavid Gibson add_str(hypertas, "hcall-copy"); 9353f5dabceSDavid Gibson add_str(hypertas, "hcall-debug"); 936c24ba3d0SLaurent Vivier add_str(hypertas, "hcall-vphn"); 9373f5dabceSDavid Gibson add_str(qemu_hypertas, "hcall-memop1"); 9383f5dabceSDavid Gibson 9393f5dabceSDavid Gibson if (!kvm_enabled() || kvmppc_spapr_use_multitce()) { 9403f5dabceSDavid Gibson add_str(hypertas, "hcall-multi-tce"); 9413f5dabceSDavid Gibson } 94230f4b05bSDavid Gibson 94330f4b05bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 94430f4b05bSDavid Gibson add_str(hypertas, "hcall-hpt-resize"); 94530f4b05bSDavid Gibson } 94630f4b05bSDavid Gibson 9473f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions", 9483f5dabceSDavid Gibson hypertas->str, hypertas->len)); 9493f5dabceSDavid Gibson g_string_free(hypertas, TRUE); 9503f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions", 9513f5dabceSDavid Gibson qemu_hypertas->str, qemu_hypertas->len)); 9523f5dabceSDavid Gibson g_string_free(qemu_hypertas, TRUE); 9533f5dabceSDavid Gibson 9543f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", 9553f5dabceSDavid Gibson refpoints, sizeof(refpoints))); 9563f5dabceSDavid Gibson 957da9f80fbSSerhii Popovych _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains", 958da9f80fbSSerhii Popovych maxdomains, sizeof(maxdomains))); 959da9f80fbSSerhii Popovych 9603f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max", 9613f5dabceSDavid Gibson RTAS_ERROR_LOG_MAX)); 9623f5dabceSDavid Gibson _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate", 9633f5dabceSDavid Gibson RTAS_EVENT_SCAN_RATE)); 9643f5dabceSDavid Gibson 9654f441474SDavid Gibson g_assert(msi_nonbroken); 9663f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0)); 9673f5dabceSDavid Gibson 9683f5dabceSDavid Gibson /* 9693f5dabceSDavid Gibson * According to PAPR, rtas ibm,os-term does not guarantee a return 9703f5dabceSDavid Gibson * back to the guest cpu. 9713f5dabceSDavid Gibson * 9723f5dabceSDavid Gibson * While an additional ibm,extended-os-term property indicates 9733f5dabceSDavid Gibson * that rtas call return will always occur. Set this property. 9743f5dabceSDavid Gibson */ 9753f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0)); 9763f5dabceSDavid Gibson 9773f5dabceSDavid Gibson _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity", 9783f5dabceSDavid Gibson lrdr_capacity, sizeof(lrdr_capacity))); 9793f5dabceSDavid Gibson 9803f5dabceSDavid Gibson spapr_dt_rtas_tokens(fdt, rtas); 9813f5dabceSDavid Gibson } 9823f5dabceSDavid Gibson 983db592b5bSCédric Le Goater /* 984db592b5bSCédric Le Goater * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU 985db592b5bSCédric Le Goater * and the XIVE features that the guest may request and thus the valid 986db592b5bSCédric Le Goater * values for bytes 23..26 of option vector 5: 987db592b5bSCédric Le Goater */ 988ce2918cbSDavid Gibson static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt, 989db592b5bSCédric Le Goater int chosen) 9909fb4541fSSam Bobroff { 991545d6e2bSSuraj Jitindar Singh PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); 992545d6e2bSSuraj Jitindar Singh 993f2b14e3aSCédric Le Goater char val[2 * 4] = { 994ca62823bSDavid Gibson 23, 0x00, /* XICS / XIVE mode */ 9959fb4541fSSam Bobroff 24, 0x00, /* Hash/Radix, filled in below. */ 9969fb4541fSSam Bobroff 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ 9979fb4541fSSam Bobroff 26, 0x40, /* Radix options: GTSE == yes. */ 9989fb4541fSSam Bobroff }; 9999fb4541fSSam Bobroff 1000ca62823bSDavid Gibson if (spapr->irq->xics && spapr->irq->xive) { 1001ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_BOTH; 1002ca62823bSDavid Gibson } else if (spapr->irq->xive) { 1003ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_EXPLOIT; 1004ca62823bSDavid Gibson } else { 1005ca62823bSDavid Gibson assert(spapr->irq->xics); 1006ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_LEGACY; 1007ca62823bSDavid Gibson } 1008ca62823bSDavid Gibson 10097abd43baSSuraj Jitindar Singh if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0, 10107abd43baSSuraj Jitindar Singh first_ppc_cpu->compat_pvr)) { 1011db592b5bSCédric Le Goater /* 1012db592b5bSCédric Le Goater * If we're in a pre POWER9 compat mode then the guest should 1013db592b5bSCédric Le Goater * do hash and use the legacy interrupt mode 1014db592b5bSCédric Le Goater */ 1015ca62823bSDavid Gibson val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */ 10167abd43baSSuraj Jitindar Singh val[3] = 0x00; /* Hash */ 10177abd43baSSuraj Jitindar Singh } else if (kvm_enabled()) { 10189fb4541fSSam Bobroff if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) { 1019f2b14e3aSCédric Le Goater val[3] = 0x80; /* OV5_MMU_BOTH */ 10209fb4541fSSam Bobroff } else if (kvmppc_has_cap_mmu_radix()) { 1021f2b14e3aSCédric Le Goater val[3] = 0x40; /* OV5_MMU_RADIX_300 */ 10229fb4541fSSam Bobroff } else { 1023f2b14e3aSCédric Le Goater val[3] = 0x00; /* Hash */ 10249fb4541fSSam Bobroff } 10259fb4541fSSam Bobroff } else { 10267abd43baSSuraj Jitindar Singh /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */ 1027f2b14e3aSCédric Le Goater val[3] = 0xC0; 1028545d6e2bSSuraj Jitindar Singh } 10299fb4541fSSam Bobroff _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support", 10309fb4541fSSam Bobroff val, sizeof(val))); 10319fb4541fSSam Bobroff } 10329fb4541fSSam Bobroff 1033ce2918cbSDavid Gibson static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt) 10347c866c6aSDavid Gibson { 10357c866c6aSDavid Gibson MachineState *machine = MACHINE(spapr); 10366c3829a2SAlexey Kardashevskiy SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 10377c866c6aSDavid Gibson int chosen; 10387c866c6aSDavid Gibson const char *boot_device = machine->boot_order; 10397c866c6aSDavid Gibson char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); 10407c866c6aSDavid Gibson size_t cb = 0; 1041907aac2fSMark Cave-Ayland char *bootlist = get_boot_devices_list(&cb); 10427c866c6aSDavid Gibson 10437c866c6aSDavid Gibson _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); 10447c866c6aSDavid Gibson 10455ced7895SAlexey Kardashevskiy if (machine->kernel_cmdline && machine->kernel_cmdline[0]) { 10465ced7895SAlexey Kardashevskiy _FDT(fdt_setprop_string(fdt, chosen, "bootargs", 10475ced7895SAlexey Kardashevskiy machine->kernel_cmdline)); 10485ced7895SAlexey Kardashevskiy } 10495ced7895SAlexey Kardashevskiy if (spapr->initrd_size) { 10507c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start", 10517c866c6aSDavid Gibson spapr->initrd_base)); 10527c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end", 10537c866c6aSDavid Gibson spapr->initrd_base + spapr->initrd_size)); 10545ced7895SAlexey Kardashevskiy } 10557c866c6aSDavid Gibson 10567c866c6aSDavid Gibson if (spapr->kernel_size) { 10577c866c6aSDavid Gibson uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR), 10587c866c6aSDavid Gibson cpu_to_be64(spapr->kernel_size) }; 10597c866c6aSDavid Gibson 10607c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel", 10617c866c6aSDavid Gibson &kprop, sizeof(kprop))); 10627c866c6aSDavid Gibson if (spapr->kernel_le) { 10637c866c6aSDavid Gibson _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0)); 10647c866c6aSDavid Gibson } 10657c866c6aSDavid Gibson } 10667c866c6aSDavid Gibson if (boot_menu) { 10677c866c6aSDavid Gibson _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu))); 10687c866c6aSDavid Gibson } 10697c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width)); 10707c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height)); 10717c866c6aSDavid Gibson _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth)); 10727c866c6aSDavid Gibson 10737c866c6aSDavid Gibson if (cb && bootlist) { 10747c866c6aSDavid Gibson int i; 10757c866c6aSDavid Gibson 10767c866c6aSDavid Gibson for (i = 0; i < cb; i++) { 10777c866c6aSDavid Gibson if (bootlist[i] == '\n') { 10787c866c6aSDavid Gibson bootlist[i] = ' '; 10797c866c6aSDavid Gibson } 10807c866c6aSDavid Gibson } 10817c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist)); 10827c866c6aSDavid Gibson } 10837c866c6aSDavid Gibson 10847c866c6aSDavid Gibson if (boot_device && strlen(boot_device)) { 10857c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device)); 10867c866c6aSDavid Gibson } 10877c866c6aSDavid Gibson 10887c866c6aSDavid Gibson if (!spapr->has_graphics && stdout_path) { 108990ee4e01SNikunj A Dadhania /* 109090ee4e01SNikunj A Dadhania * "linux,stdout-path" and "stdout" properties are deprecated by linux 109190ee4e01SNikunj A Dadhania * kernel. New platforms should only use the "stdout-path" property. Set 109290ee4e01SNikunj A Dadhania * the new property and continue using older property to remain 109390ee4e01SNikunj A Dadhania * compatible with the existing firmware. 109490ee4e01SNikunj A Dadhania */ 10957c866c6aSDavid Gibson _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path)); 109690ee4e01SNikunj A Dadhania _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path)); 10977c866c6aSDavid Gibson } 10987c866c6aSDavid Gibson 10996c3829a2SAlexey Kardashevskiy /* We can deal with BAR reallocation just fine, advertise it to the guest */ 11006c3829a2SAlexey Kardashevskiy if (smc->linux_pci_probe) { 11016c3829a2SAlexey Kardashevskiy _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0)); 11026c3829a2SAlexey Kardashevskiy } 11036c3829a2SAlexey Kardashevskiy 1104db592b5bSCédric Le Goater spapr_dt_ov5_platform_support(spapr, fdt, chosen); 11059fb4541fSSam Bobroff 11067c866c6aSDavid Gibson g_free(stdout_path); 11077c866c6aSDavid Gibson g_free(bootlist); 11087c866c6aSDavid Gibson } 11097c866c6aSDavid Gibson 1110ce2918cbSDavid Gibson static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt) 1111fca5f2dcSDavid Gibson { 1112fca5f2dcSDavid Gibson /* The /hypervisor node isn't in PAPR - this is a hack to allow PR 1113fca5f2dcSDavid Gibson * KVM to work under pHyp with some guest co-operation */ 1114fca5f2dcSDavid Gibson int hypervisor; 1115fca5f2dcSDavid Gibson uint8_t hypercall[16]; 1116fca5f2dcSDavid Gibson 1117fca5f2dcSDavid Gibson _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor")); 1118fca5f2dcSDavid Gibson /* indicate KVM hypercall interface */ 1119fca5f2dcSDavid Gibson _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm")); 1120fca5f2dcSDavid Gibson if (kvmppc_has_cap_fixup_hcalls()) { 1121fca5f2dcSDavid Gibson /* 1122fca5f2dcSDavid Gibson * Older KVM versions with older guest kernels were broken 1123fca5f2dcSDavid Gibson * with the magic page, don't allow the guest to map it. 1124fca5f2dcSDavid Gibson */ 1125fca5f2dcSDavid Gibson if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall, 1126fca5f2dcSDavid Gibson sizeof(hypercall))) { 1127fca5f2dcSDavid Gibson _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions", 1128fca5f2dcSDavid Gibson hypercall, sizeof(hypercall))); 1129fca5f2dcSDavid Gibson } 1130fca5f2dcSDavid Gibson } 1131fca5f2dcSDavid Gibson } 1132fca5f2dcSDavid Gibson 11330c21e073SDavid Gibson void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space) 113453018216SPaolo Bonzini { 1135c86c1affSDaniel Henrique Barboza MachineState *machine = MACHINE(spapr); 11363c0c47e3SDavid Gibson MachineClass *mc = MACHINE_GET_CLASS(machine); 1137ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 11387c866c6aSDavid Gibson int ret; 113953018216SPaolo Bonzini void *fdt; 1140ce2918cbSDavid Gibson SpaprPhbState *phb; 1141398a0bd5SDavid Gibson char *buf; 114253018216SPaolo Bonzini 114397b32a6aSDavid Gibson fdt = g_malloc0(space); 114497b32a6aSDavid Gibson _FDT((fdt_create_empty_tree(fdt, space))); 114553018216SPaolo Bonzini 1146398a0bd5SDavid Gibson /* Root node */ 1147398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp")); 1148398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)")); 1149398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries")); 1150398a0bd5SDavid Gibson 11510a794529SDavid Gibson /* Guest UUID & Name*/ 1152398a0bd5SDavid Gibson buf = qemu_uuid_unparse_strdup(&qemu_uuid); 1153398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf)); 1154398a0bd5SDavid Gibson if (qemu_uuid_set) { 1155398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "system-id", buf)); 1156398a0bd5SDavid Gibson } 1157398a0bd5SDavid Gibson g_free(buf); 1158398a0bd5SDavid Gibson 1159398a0bd5SDavid Gibson if (qemu_get_vm_name()) { 1160398a0bd5SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name", 1161398a0bd5SDavid Gibson qemu_get_vm_name())); 1162398a0bd5SDavid Gibson } 1163398a0bd5SDavid Gibson 11640a794529SDavid Gibson /* Host Model & Serial Number */ 11650a794529SDavid Gibson if (spapr->host_model) { 11660a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model)); 11670a794529SDavid Gibson } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) { 11680a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-model", buf)); 11690a794529SDavid Gibson g_free(buf); 11700a794529SDavid Gibson } 11710a794529SDavid Gibson 11720a794529SDavid Gibson if (spapr->host_serial) { 11730a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial)); 11740a794529SDavid Gibson } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) { 11750a794529SDavid Gibson _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf)); 11760a794529SDavid Gibson g_free(buf); 11770a794529SDavid Gibson } 11780a794529SDavid Gibson 1179398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2)); 1180398a0bd5SDavid Gibson _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); 118153018216SPaolo Bonzini 1182fc7e0765SDavid Gibson /* /interrupt controller */ 118305289273SDavid Gibson spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC); 1184fc7e0765SDavid Gibson 1185e8f986fcSBharata B Rao ret = spapr_populate_memory(spapr, fdt); 1186e8f986fcSBharata B Rao if (ret < 0) { 1187ce9863b7SCédric Le Goater error_report("couldn't setup memory nodes in fdt"); 1188e8f986fcSBharata B Rao exit(1); 118953018216SPaolo Bonzini } 119053018216SPaolo Bonzini 1191bf5a6696SDavid Gibson /* /vdevice */ 1192bf5a6696SDavid Gibson spapr_dt_vdevice(spapr->vio_bus, fdt); 119353018216SPaolo Bonzini 11944d9392beSThomas Huth if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) { 11954d9392beSThomas Huth ret = spapr_rng_populate_dt(fdt); 11964d9392beSThomas Huth if (ret < 0) { 1197ce9863b7SCédric Le Goater error_report("could not set up rng device in the fdt"); 11984d9392beSThomas Huth exit(1); 11994d9392beSThomas Huth } 12004d9392beSThomas Huth } 12014d9392beSThomas Huth 120253018216SPaolo Bonzini QLIST_FOREACH(phb, &spapr->phbs, list) { 12038cbe71ecSDavid Gibson ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL); 120453018216SPaolo Bonzini if (ret < 0) { 1205da34fed7SThomas Huth error_report("couldn't setup PCI devices in fdt"); 120653018216SPaolo Bonzini exit(1); 120753018216SPaolo Bonzini } 1208da34fed7SThomas Huth } 120953018216SPaolo Bonzini 12100da6f3feSBharata B Rao /* cpus */ 12110da6f3feSBharata B Rao spapr_populate_cpus_dt_node(fdt, spapr); 121253018216SPaolo Bonzini 1213c20d332aSBharata B Rao if (smc->dr_lmb_enabled) { 12149e7d38e8SDavid Gibson _FDT(spapr_dt_drc(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB)); 1215c20d332aSBharata B Rao } 1216c20d332aSBharata B Rao 1217c5514d0eSIgor Mammedov if (mc->has_hotpluggable_cpus) { 1218af81cf32SBharata B Rao int offset = fdt_path_offset(fdt, "/cpus"); 12199e7d38e8SDavid Gibson ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU); 1220af81cf32SBharata B Rao if (ret < 0) { 1221af81cf32SBharata B Rao error_report("Couldn't set up CPU DR device tree properties"); 1222af81cf32SBharata B Rao exit(1); 1223af81cf32SBharata B Rao } 1224af81cf32SBharata B Rao } 1225af81cf32SBharata B Rao 1226ffb1e275SDavid Gibson /* /event-sources */ 1227ffbb1705SMichael Roth spapr_dt_events(spapr, fdt); 1228ffb1e275SDavid Gibson 12293f5dabceSDavid Gibson /* /rtas */ 12303f5dabceSDavid Gibson spapr_dt_rtas(spapr, fdt); 12313f5dabceSDavid Gibson 12327c866c6aSDavid Gibson /* /chosen */ 1233a49f62b9SAlexey Kardashevskiy if (reset) { 12347c866c6aSDavid Gibson spapr_dt_chosen(spapr, fdt); 1235a49f62b9SAlexey Kardashevskiy } 1236cf6e5223SDavid Gibson 1237fca5f2dcSDavid Gibson /* /hypervisor */ 1238fca5f2dcSDavid Gibson if (kvm_enabled()) { 1239fca5f2dcSDavid Gibson spapr_dt_hypervisor(spapr, fdt); 1240fca5f2dcSDavid Gibson } 1241fca5f2dcSDavid Gibson 1242cf6e5223SDavid Gibson /* Build memory reserve map */ 1243a49f62b9SAlexey Kardashevskiy if (reset) { 1244cf6e5223SDavid Gibson if (spapr->kernel_size) { 1245cf6e5223SDavid Gibson _FDT((fdt_add_mem_rsv(fdt, KERNEL_LOAD_ADDR, spapr->kernel_size))); 1246cf6e5223SDavid Gibson } 1247cf6e5223SDavid Gibson if (spapr->initrd_size) { 1248a49f62b9SAlexey Kardashevskiy _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, 1249a49f62b9SAlexey Kardashevskiy spapr->initrd_size))); 1250a49f62b9SAlexey Kardashevskiy } 1251cf6e5223SDavid Gibson } 1252cf6e5223SDavid Gibson 12536787d27bSMichael Roth /* ibm,client-architecture-support updates */ 12546787d27bSMichael Roth ret = spapr_dt_cas_updates(spapr, fdt, spapr->ov5_cas); 12556787d27bSMichael Roth if (ret < 0) { 12566787d27bSMichael Roth error_report("couldn't setup CAS properties fdt"); 12576787d27bSMichael Roth exit(1); 12586787d27bSMichael Roth } 12596787d27bSMichael Roth 12603998ccd0SNathan Fontenot if (smc->dr_phb_enabled) { 12619e7d38e8SDavid Gibson ret = spapr_dt_drc(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_PHB); 12623998ccd0SNathan Fontenot if (ret < 0) { 12633998ccd0SNathan Fontenot error_report("Couldn't set up PHB DR device tree properties"); 12643998ccd0SNathan Fontenot exit(1); 12653998ccd0SNathan Fontenot } 12663998ccd0SNathan Fontenot } 12673998ccd0SNathan Fontenot 1268997b6cfcSDavid Gibson return fdt; 126953018216SPaolo Bonzini } 127053018216SPaolo Bonzini 127153018216SPaolo Bonzini static uint64_t translate_kernel_address(void *opaque, uint64_t addr) 127253018216SPaolo Bonzini { 127353018216SPaolo Bonzini return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR; 127453018216SPaolo Bonzini } 127553018216SPaolo Bonzini 12761d1be34dSDavid Gibson static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, 12771d1be34dSDavid Gibson PowerPCCPU *cpu) 127853018216SPaolo Bonzini { 127953018216SPaolo Bonzini CPUPPCState *env = &cpu->env; 128053018216SPaolo Bonzini 12818d04fb55SJan Kiszka /* The TCG path should also be holding the BQL at this point */ 12828d04fb55SJan Kiszka g_assert(qemu_mutex_iothread_locked()); 12838d04fb55SJan Kiszka 128453018216SPaolo Bonzini if (msr_pr) { 128553018216SPaolo Bonzini hcall_dprintf("Hypercall made with MSR[PR]=1\n"); 128653018216SPaolo Bonzini env->gpr[3] = H_PRIVILEGE; 128753018216SPaolo Bonzini } else { 128853018216SPaolo Bonzini env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); 128953018216SPaolo Bonzini } 129053018216SPaolo Bonzini } 129153018216SPaolo Bonzini 129200fd075eSBenjamin Herrenschmidt struct LPCRSyncState { 129300fd075eSBenjamin Herrenschmidt target_ulong value; 129400fd075eSBenjamin Herrenschmidt target_ulong mask; 129500fd075eSBenjamin Herrenschmidt }; 129600fd075eSBenjamin Herrenschmidt 129700fd075eSBenjamin Herrenschmidt static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg) 129800fd075eSBenjamin Herrenschmidt { 129900fd075eSBenjamin Herrenschmidt struct LPCRSyncState *s = arg.host_ptr; 130000fd075eSBenjamin Herrenschmidt PowerPCCPU *cpu = POWERPC_CPU(cs); 130100fd075eSBenjamin Herrenschmidt CPUPPCState *env = &cpu->env; 130200fd075eSBenjamin Herrenschmidt target_ulong lpcr; 130300fd075eSBenjamin Herrenschmidt 130400fd075eSBenjamin Herrenschmidt cpu_synchronize_state(cs); 130500fd075eSBenjamin Herrenschmidt lpcr = env->spr[SPR_LPCR]; 130600fd075eSBenjamin Herrenschmidt lpcr &= ~s->mask; 130700fd075eSBenjamin Herrenschmidt lpcr |= s->value; 130800fd075eSBenjamin Herrenschmidt ppc_store_lpcr(cpu, lpcr); 130900fd075eSBenjamin Herrenschmidt } 131000fd075eSBenjamin Herrenschmidt 131100fd075eSBenjamin Herrenschmidt void spapr_set_all_lpcrs(target_ulong value, target_ulong mask) 131200fd075eSBenjamin Herrenschmidt { 131300fd075eSBenjamin Herrenschmidt CPUState *cs; 131400fd075eSBenjamin Herrenschmidt struct LPCRSyncState s = { 131500fd075eSBenjamin Herrenschmidt .value = value, 131600fd075eSBenjamin Herrenschmidt .mask = mask 131700fd075eSBenjamin Herrenschmidt }; 131800fd075eSBenjamin Herrenschmidt CPU_FOREACH(cs) { 131900fd075eSBenjamin Herrenschmidt run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s)); 132000fd075eSBenjamin Herrenschmidt } 132100fd075eSBenjamin Herrenschmidt } 132200fd075eSBenjamin Herrenschmidt 132379825f4dSBenjamin Herrenschmidt static void spapr_get_pate(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry) 13249861bb3eSSuraj Jitindar Singh { 1325ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 13269861bb3eSSuraj Jitindar Singh 132779825f4dSBenjamin Herrenschmidt /* Copy PATE1:GR into PATE0:HR */ 132879825f4dSBenjamin Herrenschmidt entry->dw0 = spapr->patb_entry & PATE0_HR; 132979825f4dSBenjamin Herrenschmidt entry->dw1 = spapr->patb_entry; 13309861bb3eSSuraj Jitindar Singh } 13319861bb3eSSuraj Jitindar Singh 1332e6b8fd24SSamuel Mendoza-Jonas #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) 1333e6b8fd24SSamuel Mendoza-Jonas #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) 1334e6b8fd24SSamuel Mendoza-Jonas #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) 1335e6b8fd24SSamuel Mendoza-Jonas #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY)) 1336e6b8fd24SSamuel Mendoza-Jonas #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY)) 1337e6b8fd24SSamuel Mendoza-Jonas 1338715c5407SDavid Gibson /* 1339715c5407SDavid Gibson * Get the fd to access the kernel htab, re-opening it if necessary 1340715c5407SDavid Gibson */ 1341ce2918cbSDavid Gibson static int get_htab_fd(SpaprMachineState *spapr) 1342715c5407SDavid Gibson { 134314b0d748SGreg Kurz Error *local_err = NULL; 134414b0d748SGreg Kurz 1345715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1346715c5407SDavid Gibson return spapr->htab_fd; 1347715c5407SDavid Gibson } 1348715c5407SDavid Gibson 134914b0d748SGreg Kurz spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err); 1350715c5407SDavid Gibson if (spapr->htab_fd < 0) { 135114b0d748SGreg Kurz error_report_err(local_err); 1352715c5407SDavid Gibson } 1353715c5407SDavid Gibson 1354715c5407SDavid Gibson return spapr->htab_fd; 1355715c5407SDavid Gibson } 1356715c5407SDavid Gibson 1357ce2918cbSDavid Gibson void close_htab_fd(SpaprMachineState *spapr) 1358715c5407SDavid Gibson { 1359715c5407SDavid Gibson if (spapr->htab_fd >= 0) { 1360715c5407SDavid Gibson close(spapr->htab_fd); 1361715c5407SDavid Gibson } 1362715c5407SDavid Gibson spapr->htab_fd = -1; 1363715c5407SDavid Gibson } 1364715c5407SDavid Gibson 1365e57ca75cSDavid Gibson static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp) 1366e57ca75cSDavid Gibson { 1367ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1368e57ca75cSDavid Gibson 1369e57ca75cSDavid Gibson return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1; 1370e57ca75cSDavid Gibson } 1371e57ca75cSDavid Gibson 13721ec26c75SGreg Kurz static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp) 13731ec26c75SGreg Kurz { 1374ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 13751ec26c75SGreg Kurz 13761ec26c75SGreg Kurz assert(kvm_enabled()); 13771ec26c75SGreg Kurz 13781ec26c75SGreg Kurz if (!spapr->htab) { 13791ec26c75SGreg Kurz return 0; 13801ec26c75SGreg Kurz } 13811ec26c75SGreg Kurz 13821ec26c75SGreg Kurz return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18); 13831ec26c75SGreg Kurz } 13841ec26c75SGreg Kurz 1385e57ca75cSDavid Gibson static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp, 1386e57ca75cSDavid Gibson hwaddr ptex, int n) 1387e57ca75cSDavid Gibson { 1388ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1389e57ca75cSDavid Gibson hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 1390e57ca75cSDavid Gibson 1391e57ca75cSDavid Gibson if (!spapr->htab) { 1392e57ca75cSDavid Gibson /* 1393e57ca75cSDavid Gibson * HTAB is controlled by KVM. Fetch into temporary buffer 1394e57ca75cSDavid Gibson */ 1395e57ca75cSDavid Gibson ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64); 1396e57ca75cSDavid Gibson kvmppc_read_hptes(hptes, ptex, n); 1397e57ca75cSDavid Gibson return hptes; 1398e57ca75cSDavid Gibson } 1399e57ca75cSDavid Gibson 1400e57ca75cSDavid Gibson /* 1401e57ca75cSDavid Gibson * HTAB is controlled by QEMU. Just point to the internally 1402e57ca75cSDavid Gibson * accessible PTEG. 1403e57ca75cSDavid Gibson */ 1404e57ca75cSDavid Gibson return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset); 1405e57ca75cSDavid Gibson } 1406e57ca75cSDavid Gibson 1407e57ca75cSDavid Gibson static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp, 1408e57ca75cSDavid Gibson const ppc_hash_pte64_t *hptes, 1409e57ca75cSDavid Gibson hwaddr ptex, int n) 1410e57ca75cSDavid Gibson { 1411ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1412e57ca75cSDavid Gibson 1413e57ca75cSDavid Gibson if (!spapr->htab) { 1414e57ca75cSDavid Gibson g_free((void *)hptes); 1415e57ca75cSDavid Gibson } 1416e57ca75cSDavid Gibson 1417e57ca75cSDavid Gibson /* Nothing to do for qemu managed HPT */ 1418e57ca75cSDavid Gibson } 1419e57ca75cSDavid Gibson 1420a2dd4e83SBenjamin Herrenschmidt void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 1421e57ca75cSDavid Gibson uint64_t pte0, uint64_t pte1) 1422e57ca75cSDavid Gibson { 1423a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp); 1424e57ca75cSDavid Gibson hwaddr offset = ptex * HASH_PTE_SIZE_64; 1425e57ca75cSDavid Gibson 1426e57ca75cSDavid Gibson if (!spapr->htab) { 1427e57ca75cSDavid Gibson kvmppc_write_hpte(ptex, pte0, pte1); 1428e57ca75cSDavid Gibson } else { 14293054b0caSBenjamin Herrenschmidt if (pte0 & HPTE64_V_VALID) { 1430e57ca75cSDavid Gibson stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); 14313054b0caSBenjamin Herrenschmidt /* 14323054b0caSBenjamin Herrenschmidt * When setting valid, we write PTE1 first. This ensures 14333054b0caSBenjamin Herrenschmidt * proper synchronization with the reading code in 14343054b0caSBenjamin Herrenschmidt * ppc_hash64_pteg_search() 14353054b0caSBenjamin Herrenschmidt */ 14363054b0caSBenjamin Herrenschmidt smp_wmb(); 14373054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset, pte0); 14383054b0caSBenjamin Herrenschmidt } else { 14393054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset, pte0); 14403054b0caSBenjamin Herrenschmidt /* 14413054b0caSBenjamin Herrenschmidt * When clearing it we set PTE0 first. This ensures proper 14423054b0caSBenjamin Herrenschmidt * synchronization with the reading code in 14433054b0caSBenjamin Herrenschmidt * ppc_hash64_pteg_search() 14443054b0caSBenjamin Herrenschmidt */ 14453054b0caSBenjamin Herrenschmidt smp_wmb(); 14463054b0caSBenjamin Herrenschmidt stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); 14473054b0caSBenjamin Herrenschmidt } 1448e57ca75cSDavid Gibson } 1449e57ca75cSDavid Gibson } 1450e57ca75cSDavid Gibson 1451a2dd4e83SBenjamin Herrenschmidt static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1452a2dd4e83SBenjamin Herrenschmidt uint64_t pte1) 1453a2dd4e83SBenjamin Herrenschmidt { 1454a2dd4e83SBenjamin Herrenschmidt hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15; 1455a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1456a2dd4e83SBenjamin Herrenschmidt 1457a2dd4e83SBenjamin Herrenschmidt if (!spapr->htab) { 1458a2dd4e83SBenjamin Herrenschmidt /* There should always be a hash table when this is called */ 1459a2dd4e83SBenjamin Herrenschmidt error_report("spapr_hpte_set_c called with no hash table !"); 1460a2dd4e83SBenjamin Herrenschmidt return; 1461a2dd4e83SBenjamin Herrenschmidt } 1462a2dd4e83SBenjamin Herrenschmidt 1463a2dd4e83SBenjamin Herrenschmidt /* The HW performs a non-atomic byte update */ 1464a2dd4e83SBenjamin Herrenschmidt stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80); 1465a2dd4e83SBenjamin Herrenschmidt } 1466a2dd4e83SBenjamin Herrenschmidt 1467a2dd4e83SBenjamin Herrenschmidt static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1468a2dd4e83SBenjamin Herrenschmidt uint64_t pte1) 1469a2dd4e83SBenjamin Herrenschmidt { 1470a2dd4e83SBenjamin Herrenschmidt hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14; 1471a2dd4e83SBenjamin Herrenschmidt SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1472a2dd4e83SBenjamin Herrenschmidt 1473a2dd4e83SBenjamin Herrenschmidt if (!spapr->htab) { 1474a2dd4e83SBenjamin Herrenschmidt /* There should always be a hash table when this is called */ 1475a2dd4e83SBenjamin Herrenschmidt error_report("spapr_hpte_set_r called with no hash table !"); 1476a2dd4e83SBenjamin Herrenschmidt return; 1477a2dd4e83SBenjamin Herrenschmidt } 1478a2dd4e83SBenjamin Herrenschmidt 1479a2dd4e83SBenjamin Herrenschmidt /* The HW performs a non-atomic byte update */ 1480a2dd4e83SBenjamin Herrenschmidt stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01); 1481a2dd4e83SBenjamin Herrenschmidt } 1482a2dd4e83SBenjamin Herrenschmidt 14830b0b8310SDavid Gibson int spapr_hpt_shift_for_ramsize(uint64_t ramsize) 14848dfe8e7fSDavid Gibson { 14858dfe8e7fSDavid Gibson int shift; 14868dfe8e7fSDavid Gibson 14878dfe8e7fSDavid Gibson /* We aim for a hash table of size 1/128 the size of RAM (rounded 14888dfe8e7fSDavid Gibson * up). The PAPR recommendation is actually 1/64 of RAM size, but 14898dfe8e7fSDavid Gibson * that's much more than is needed for Linux guests */ 14908dfe8e7fSDavid Gibson shift = ctz64(pow2ceil(ramsize)) - 7; 14918dfe8e7fSDavid Gibson shift = MAX(shift, 18); /* Minimum architected size */ 14928dfe8e7fSDavid Gibson shift = MIN(shift, 46); /* Maximum architected size */ 14938dfe8e7fSDavid Gibson return shift; 14948dfe8e7fSDavid Gibson } 14958dfe8e7fSDavid Gibson 1496ce2918cbSDavid Gibson void spapr_free_hpt(SpaprMachineState *spapr) 149706ec79e8SBharata B Rao { 149806ec79e8SBharata B Rao g_free(spapr->htab); 149906ec79e8SBharata B Rao spapr->htab = NULL; 150006ec79e8SBharata B Rao spapr->htab_shift = 0; 150106ec79e8SBharata B Rao close_htab_fd(spapr); 150206ec79e8SBharata B Rao } 150306ec79e8SBharata B Rao 1504ce2918cbSDavid Gibson void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, 1505c5f54f3eSDavid Gibson Error **errp) 150653018216SPaolo Bonzini { 1507c5f54f3eSDavid Gibson long rc; 150853018216SPaolo Bonzini 1509c5f54f3eSDavid Gibson /* Clean up any HPT info from a previous boot */ 151006ec79e8SBharata B Rao spapr_free_hpt(spapr); 151153018216SPaolo Bonzini 1512c5f54f3eSDavid Gibson rc = kvmppc_reset_htab(shift); 1513c5f54f3eSDavid Gibson if (rc < 0) { 1514c5f54f3eSDavid Gibson /* kernel-side HPT needed, but couldn't allocate one */ 1515c5f54f3eSDavid Gibson error_setg_errno(errp, errno, 1516c5f54f3eSDavid Gibson "Failed to allocate KVM HPT of order %d (try smaller maxmem?)", 1517c5f54f3eSDavid Gibson shift); 1518c5f54f3eSDavid Gibson /* This is almost certainly fatal, but if the caller really 1519c5f54f3eSDavid Gibson * wants to carry on with shift == 0, it's welcome to try */ 1520c5f54f3eSDavid Gibson } else if (rc > 0) { 1521c5f54f3eSDavid Gibson /* kernel-side HPT allocated */ 1522c5f54f3eSDavid Gibson if (rc != shift) { 1523c5f54f3eSDavid Gibson error_setg(errp, 1524c5f54f3eSDavid Gibson "Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)", 1525c5f54f3eSDavid Gibson shift, rc); 15267735fedaSBharata B Rao } 15277735fedaSBharata B Rao 152853018216SPaolo Bonzini spapr->htab_shift = shift; 1529c18ad9a5SDavid Gibson spapr->htab = NULL; 1530b817772aSBharata B Rao } else { 1531c5f54f3eSDavid Gibson /* kernel-side HPT not needed, allocate in userspace instead */ 1532c5f54f3eSDavid Gibson size_t size = 1ULL << shift; 1533c5f54f3eSDavid Gibson int i; 153401a57972SSamuel Mendoza-Jonas 1535c5f54f3eSDavid Gibson spapr->htab = qemu_memalign(size, size); 1536c5f54f3eSDavid Gibson if (!spapr->htab) { 1537c5f54f3eSDavid Gibson error_setg_errno(errp, errno, 1538c5f54f3eSDavid Gibson "Could not allocate HPT of order %d", shift); 1539c5f54f3eSDavid Gibson return; 1540b817772aSBharata B Rao } 1541b817772aSBharata B Rao 1542c5f54f3eSDavid Gibson memset(spapr->htab, 0, size); 1543c5f54f3eSDavid Gibson spapr->htab_shift = shift; 1544b817772aSBharata B Rao 1545c5f54f3eSDavid Gibson for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { 1546c5f54f3eSDavid Gibson DIRTY_HPTE(HPTE(spapr->htab, i)); 15477735fedaSBharata B Rao } 154853018216SPaolo Bonzini } 1549ee4d9eccSSuraj Jitindar Singh /* We're setting up a hash table, so that means we're not radix */ 1550176dcceeSSuraj Jitindar Singh spapr->patb_entry = 0; 155100fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(0, LPCR_HR | LPCR_UPRT); 155253018216SPaolo Bonzini } 155353018216SPaolo Bonzini 1554ce2918cbSDavid Gibson void spapr_setup_hpt_and_vrma(SpaprMachineState *spapr) 1555b4db5413SSuraj Jitindar Singh { 15562772cf6bSDavid Gibson int hpt_shift; 15572772cf6bSDavid Gibson 15582772cf6bSDavid Gibson if ((spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) 15592772cf6bSDavid Gibson || (spapr->cas_reboot 15602772cf6bSDavid Gibson && !spapr_ovec_test(spapr->ov5_cas, OV5_HPT_RESIZE))) { 15612772cf6bSDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); 15622772cf6bSDavid Gibson } else { 1563768a20f3SDavid Gibson uint64_t current_ram_size; 1564768a20f3SDavid Gibson 1565768a20f3SDavid Gibson current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size(); 1566768a20f3SDavid Gibson hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size); 15672772cf6bSDavid Gibson } 15682772cf6bSDavid Gibson spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal); 15692772cf6bSDavid Gibson 1570b4db5413SSuraj Jitindar Singh if (spapr->vrma_adjust) { 1571c86c1affSDaniel Henrique Barboza spapr->rma_size = kvmppc_rma_size(spapr_node0_size(MACHINE(spapr)), 1572b4db5413SSuraj Jitindar Singh spapr->htab_shift); 1573b4db5413SSuraj Jitindar Singh } 1574b4db5413SSuraj Jitindar Singh } 1575b4db5413SSuraj Jitindar Singh 157682512483SGreg Kurz static int spapr_reset_drcs(Object *child, void *opaque) 157782512483SGreg Kurz { 1578ce2918cbSDavid Gibson SpaprDrc *drc = 1579ce2918cbSDavid Gibson (SpaprDrc *) object_dynamic_cast(child, 158082512483SGreg Kurz TYPE_SPAPR_DR_CONNECTOR); 158182512483SGreg Kurz 158282512483SGreg Kurz if (drc) { 158382512483SGreg Kurz spapr_drc_reset(drc); 158482512483SGreg Kurz } 158582512483SGreg Kurz 158682512483SGreg Kurz return 0; 158782512483SGreg Kurz } 158882512483SGreg Kurz 1589a0628599SLike Xu static void spapr_machine_reset(MachineState *machine) 159053018216SPaolo Bonzini { 1591ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(machine); 1592182735efSAndreas Färber PowerPCCPU *first_ppc_cpu; 1593744a928cSAlexey Kardashevskiy hwaddr fdt_addr; 1594997b6cfcSDavid Gibson void *fdt; 1595997b6cfcSDavid Gibson int rc; 1596259186a7SAndreas Färber 1597905db916SBharata B Rao kvmppc_svm_off(&error_fatal); 15989f6edd06SDavid Gibson spapr_caps_apply(spapr); 159933face6bSDavid Gibson 16001481fe5fSLaurent Vivier first_ppc_cpu = POWERPC_CPU(first_cpu); 16011481fe5fSLaurent Vivier if (kvm_enabled() && kvmppc_has_cap_mmu_radix() && 1602ad99d04cSDavid Gibson ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, 16031481fe5fSLaurent Vivier spapr->max_compat_pvr)) { 160479825f4dSBenjamin Herrenschmidt /* 160579825f4dSBenjamin Herrenschmidt * If using KVM with radix mode available, VCPUs can be started 1606b4db5413SSuraj Jitindar Singh * without a HPT because KVM will start them in radix mode. 160779825f4dSBenjamin Herrenschmidt * Set the GR bit in PATE so that we know there is no HPT. 160879825f4dSBenjamin Herrenschmidt */ 160979825f4dSBenjamin Herrenschmidt spapr->patb_entry = PATE1_GR; 161000fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT); 1611b4db5413SSuraj Jitindar Singh } else { 1612b4db5413SSuraj Jitindar Singh spapr_setup_hpt_and_vrma(spapr); 1613c5f54f3eSDavid Gibson } 161453018216SPaolo Bonzini 161525c9780dSDavid Gibson qemu_devices_reset(); 161625c9780dSDavid Gibson 161725c9780dSDavid Gibson /* 161879825f4dSBenjamin Herrenschmidt * If this reset wasn't generated by CAS, we should reset our 161979825f4dSBenjamin Herrenschmidt * negotiated options and start from scratch 162079825f4dSBenjamin Herrenschmidt */ 16219012a53fSGreg Kurz if (!spapr->cas_reboot) { 16229012a53fSGreg Kurz spapr_ovec_cleanup(spapr->ov5_cas); 16239012a53fSGreg Kurz spapr->ov5_cas = spapr_ovec_new(); 16249012a53fSGreg Kurz 1625ce03a193SLaurent Vivier ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal); 16269012a53fSGreg Kurz } 16279012a53fSGreg Kurz 1628ec132efaSAlexey Kardashevskiy /* 1629b2e22477SCédric Le Goater * This is fixing some of the default configuration of the XIVE 1630b2e22477SCédric Le Goater * devices. To be called after the reset of the machine devices. 1631b2e22477SCédric Le Goater */ 1632b2e22477SCédric Le Goater spapr_irq_reset(spapr, &error_fatal); 1633b2e22477SCédric Le Goater 163423ff81bdSGreg Kurz /* 163523ff81bdSGreg Kurz * There is no CAS under qtest. Simulate one to please the code that 163623ff81bdSGreg Kurz * depends on spapr->ov5_cas. This is especially needed to test device 163723ff81bdSGreg Kurz * unplug, so we do that before resetting the DRCs. 163823ff81bdSGreg Kurz */ 163923ff81bdSGreg Kurz if (qtest_enabled()) { 164023ff81bdSGreg Kurz spapr_ovec_cleanup(spapr->ov5_cas); 164123ff81bdSGreg Kurz spapr->ov5_cas = spapr_ovec_clone(spapr->ov5); 164223ff81bdSGreg Kurz } 164323ff81bdSGreg Kurz 164482512483SGreg Kurz /* DRC reset may cause a device to be unplugged. This will cause troubles 164582512483SGreg Kurz * if this device is used by another device (eg, a running vhost backend 164682512483SGreg Kurz * will crash QEMU if the DIMM holding the vring goes away). To avoid such 164782512483SGreg Kurz * situations, we reset DRCs after all devices have been reset. 164882512483SGreg Kurz */ 164982512483SGreg Kurz object_child_foreach_recursive(object_get_root(), spapr_reset_drcs, NULL); 165082512483SGreg Kurz 165156258174SDaniel Henrique Barboza spapr_clear_pending_events(spapr); 165253018216SPaolo Bonzini 1653b7d1f77aSBenjamin Herrenschmidt /* 1654b7d1f77aSBenjamin Herrenschmidt * We place the device tree and RTAS just below either the top of the RMA, 1655df269271SAlexey Kardashevskiy * or just below 2GB, whichever is lower, so that it can be 1656b7d1f77aSBenjamin Herrenschmidt * processed with 32-bit real mode code if necessary 1657b7d1f77aSBenjamin Herrenschmidt */ 1658744a928cSAlexey Kardashevskiy fdt_addr = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FDT_MAX_SIZE; 1659b7d1f77aSBenjamin Herrenschmidt 166097b32a6aSDavid Gibson fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE); 166153018216SPaolo Bonzini 1662997b6cfcSDavid Gibson rc = fdt_pack(fdt); 1663997b6cfcSDavid Gibson 1664997b6cfcSDavid Gibson /* Should only fail if we've built a corrupted tree */ 1665997b6cfcSDavid Gibson assert(rc == 0); 1666997b6cfcSDavid Gibson 1667997b6cfcSDavid Gibson /* Load the fdt */ 1668997b6cfcSDavid Gibson qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); 1669cae172abSDavid Gibson cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); 1670fea35ca4SAlexey Kardashevskiy g_free(spapr->fdt_blob); 1671fea35ca4SAlexey Kardashevskiy spapr->fdt_size = fdt_totalsize(fdt); 1672fea35ca4SAlexey Kardashevskiy spapr->fdt_initial_size = spapr->fdt_size; 1673fea35ca4SAlexey Kardashevskiy spapr->fdt_blob = fdt; 1674997b6cfcSDavid Gibson 167553018216SPaolo Bonzini /* Set up the entry state */ 167684369f63SDavid Gibson spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, fdt_addr); 1677182735efSAndreas Färber first_ppc_cpu->env.gpr[5] = 0; 167853018216SPaolo Bonzini 16796787d27bSMichael Roth spapr->cas_reboot = false; 168053018216SPaolo Bonzini } 168153018216SPaolo Bonzini 1682ce2918cbSDavid Gibson static void spapr_create_nvram(SpaprMachineState *spapr) 168353018216SPaolo Bonzini { 16842ff3de68SMarkus Armbruster DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram"); 16853978b863SPaolo Bonzini DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); 168653018216SPaolo Bonzini 16873978b863SPaolo Bonzini if (dinfo) { 16886231a6daSMarkus Armbruster qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo), 16896231a6daSMarkus Armbruster &error_fatal); 169053018216SPaolo Bonzini } 169153018216SPaolo Bonzini 169253018216SPaolo Bonzini qdev_init_nofail(dev); 169353018216SPaolo Bonzini 1694ce2918cbSDavid Gibson spapr->nvram = (struct SpaprNvram *)dev; 169553018216SPaolo Bonzini } 169653018216SPaolo Bonzini 1697ce2918cbSDavid Gibson static void spapr_rtc_create(SpaprMachineState *spapr) 169828df36a1SDavid Gibson { 1699f6d4dca8SThomas Huth object_initialize_child(OBJECT(spapr), "rtc", 1700f6d4dca8SThomas Huth &spapr->rtc, sizeof(spapr->rtc), TYPE_SPAPR_RTC, 1701f6d4dca8SThomas Huth &error_fatal, NULL); 1702147ff807SCédric Le Goater object_property_set_bool(OBJECT(&spapr->rtc), true, "realized", 1703147ff807SCédric Le Goater &error_fatal); 1704147ff807SCédric Le Goater object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc), 1705147ff807SCédric Le Goater "date", &error_fatal); 170628df36a1SDavid Gibson } 170728df36a1SDavid Gibson 170853018216SPaolo Bonzini /* Returns whether we want to use VGA or not */ 170914c6a894SDavid Gibson static bool spapr_vga_init(PCIBus *pci_bus, Error **errp) 171053018216SPaolo Bonzini { 171153018216SPaolo Bonzini switch (vga_interface_type) { 171253018216SPaolo Bonzini case VGA_NONE: 17137effdaa3SMark Wu return false; 17147effdaa3SMark Wu case VGA_DEVICE: 17157effdaa3SMark Wu return true; 171653018216SPaolo Bonzini case VGA_STD: 1717b798c190SBenjamin Herrenschmidt case VGA_VIRTIO: 17186e66d0c6SThomas Huth case VGA_CIRRUS: 171953018216SPaolo Bonzini return pci_vga_init(pci_bus) != NULL; 172053018216SPaolo Bonzini default: 172114c6a894SDavid Gibson error_setg(errp, 172214c6a894SDavid Gibson "Unsupported VGA mode, only -vga std or -vga virtio is supported"); 172314c6a894SDavid Gibson return false; 172453018216SPaolo Bonzini } 172553018216SPaolo Bonzini } 172653018216SPaolo Bonzini 17274e5fe368SSuraj Jitindar Singh static int spapr_pre_load(void *opaque) 17284e5fe368SSuraj Jitindar Singh { 17294e5fe368SSuraj Jitindar Singh int rc; 17304e5fe368SSuraj Jitindar Singh 17314e5fe368SSuraj Jitindar Singh rc = spapr_caps_pre_load(opaque); 17324e5fe368SSuraj Jitindar Singh if (rc) { 17334e5fe368SSuraj Jitindar Singh return rc; 17344e5fe368SSuraj Jitindar Singh } 17354e5fe368SSuraj Jitindar Singh 17364e5fe368SSuraj Jitindar Singh return 0; 17374e5fe368SSuraj Jitindar Singh } 17384e5fe368SSuraj Jitindar Singh 1739880ae7deSDavid Gibson static int spapr_post_load(void *opaque, int version_id) 1740880ae7deSDavid Gibson { 1741ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1742880ae7deSDavid Gibson int err = 0; 1743880ae7deSDavid Gibson 1744be85537dSDavid Gibson err = spapr_caps_post_migration(spapr); 1745be85537dSDavid Gibson if (err) { 1746be85537dSDavid Gibson return err; 1747be85537dSDavid Gibson } 1748be85537dSDavid Gibson 1749e502202cSCédric Le Goater /* 1750e502202cSCédric Le Goater * In earlier versions, there was no separate qdev for the PAPR 1751880ae7deSDavid Gibson * RTC, so the RTC offset was stored directly in sPAPREnvironment. 1752880ae7deSDavid Gibson * So when migrating from those versions, poke the incoming offset 1753e502202cSCédric Le Goater * value into the RTC device 1754e502202cSCédric Le Goater */ 1755880ae7deSDavid Gibson if (version_id < 3) { 1756147ff807SCédric Le Goater err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset); 1757e502202cSCédric Le Goater if (err) { 1758e502202cSCédric Le Goater return err; 1759e502202cSCédric Le Goater } 1760880ae7deSDavid Gibson } 1761880ae7deSDavid Gibson 17620c86b2dfSLaurent Vivier if (kvm_enabled() && spapr->patb_entry) { 1763d39c90f5SBharata B Rao PowerPCCPU *cpu = POWERPC_CPU(first_cpu); 176479825f4dSBenjamin Herrenschmidt bool radix = !!(spapr->patb_entry & PATE1_GR); 1765d39c90f5SBharata B Rao bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE); 1766d39c90f5SBharata B Rao 176700fd075eSBenjamin Herrenschmidt /* 176800fd075eSBenjamin Herrenschmidt * Update LPCR:HR and UPRT as they may not be set properly in 176900fd075eSBenjamin Herrenschmidt * the stream 177000fd075eSBenjamin Herrenschmidt */ 177100fd075eSBenjamin Herrenschmidt spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0, 177200fd075eSBenjamin Herrenschmidt LPCR_HR | LPCR_UPRT); 177300fd075eSBenjamin Herrenschmidt 1774d39c90f5SBharata B Rao err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry); 1775d39c90f5SBharata B Rao if (err) { 1776d39c90f5SBharata B Rao error_report("Process table config unsupported by the host"); 1777d39c90f5SBharata B Rao return -EINVAL; 1778d39c90f5SBharata B Rao } 1779d39c90f5SBharata B Rao } 1780d39c90f5SBharata B Rao 17811c53b06cSCédric Le Goater err = spapr_irq_post_load(spapr, version_id); 17821c53b06cSCédric Le Goater if (err) { 17831c53b06cSCédric Le Goater return err; 17841c53b06cSCédric Le Goater } 17851c53b06cSCédric Le Goater 1786880ae7deSDavid Gibson return err; 1787880ae7deSDavid Gibson } 1788880ae7deSDavid Gibson 17894e5fe368SSuraj Jitindar Singh static int spapr_pre_save(void *opaque) 17904e5fe368SSuraj Jitindar Singh { 17914e5fe368SSuraj Jitindar Singh int rc; 17924e5fe368SSuraj Jitindar Singh 17934e5fe368SSuraj Jitindar Singh rc = spapr_caps_pre_save(opaque); 17944e5fe368SSuraj Jitindar Singh if (rc) { 17954e5fe368SSuraj Jitindar Singh return rc; 17964e5fe368SSuraj Jitindar Singh } 17974e5fe368SSuraj Jitindar Singh 17984e5fe368SSuraj Jitindar Singh return 0; 17994e5fe368SSuraj Jitindar Singh } 18004e5fe368SSuraj Jitindar Singh 1801880ae7deSDavid Gibson static bool version_before_3(void *opaque, int version_id) 1802880ae7deSDavid Gibson { 1803880ae7deSDavid Gibson return version_id < 3; 1804880ae7deSDavid Gibson } 1805880ae7deSDavid Gibson 1806fd38804bSDaniel Henrique Barboza static bool spapr_pending_events_needed(void *opaque) 1807fd38804bSDaniel Henrique Barboza { 1808ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1809fd38804bSDaniel Henrique Barboza return !QTAILQ_EMPTY(&spapr->pending_events); 1810fd38804bSDaniel Henrique Barboza } 1811fd38804bSDaniel Henrique Barboza 1812fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_event_entry = { 1813fd38804bSDaniel Henrique Barboza .name = "spapr_event_log_entry", 1814fd38804bSDaniel Henrique Barboza .version_id = 1, 1815fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1816fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 1817ce2918cbSDavid Gibson VMSTATE_UINT32(summary, SpaprEventLogEntry), 1818ce2918cbSDavid Gibson VMSTATE_UINT32(extended_length, SpaprEventLogEntry), 1819ce2918cbSDavid Gibson VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0, 18205341258eSDavid Gibson NULL, extended_length), 1821fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1822fd38804bSDaniel Henrique Barboza }, 1823fd38804bSDaniel Henrique Barboza }; 1824fd38804bSDaniel Henrique Barboza 1825fd38804bSDaniel Henrique Barboza static const VMStateDescription vmstate_spapr_pending_events = { 1826fd38804bSDaniel Henrique Barboza .name = "spapr_pending_events", 1827fd38804bSDaniel Henrique Barboza .version_id = 1, 1828fd38804bSDaniel Henrique Barboza .minimum_version_id = 1, 1829fd38804bSDaniel Henrique Barboza .needed = spapr_pending_events_needed, 1830fd38804bSDaniel Henrique Barboza .fields = (VMStateField[]) { 1831ce2918cbSDavid Gibson VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1, 1832ce2918cbSDavid Gibson vmstate_spapr_event_entry, SpaprEventLogEntry, next), 1833fd38804bSDaniel Henrique Barboza VMSTATE_END_OF_LIST() 1834fd38804bSDaniel Henrique Barboza }, 1835fd38804bSDaniel Henrique Barboza }; 1836fd38804bSDaniel Henrique Barboza 183762ef3760SMichael Roth static bool spapr_ov5_cas_needed(void *opaque) 183862ef3760SMichael Roth { 1839ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 1840ce2918cbSDavid Gibson SpaprOptionVector *ov5_mask = spapr_ovec_new(); 184162ef3760SMichael Roth bool cas_needed; 184262ef3760SMichael Roth 1843ce2918cbSDavid Gibson /* Prior to the introduction of SpaprOptionVector, we had two option 184462ef3760SMichael Roth * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY. 184562ef3760SMichael Roth * Both of these options encode machine topology into the device-tree 184662ef3760SMichael Roth * in such a way that the now-booted OS should still be able to interact 184762ef3760SMichael Roth * appropriately with QEMU regardless of what options were actually 184862ef3760SMichael Roth * negotiatied on the source side. 184962ef3760SMichael Roth * 185062ef3760SMichael Roth * As such, we can avoid migrating the CAS-negotiated options if these 185162ef3760SMichael Roth * are the only options available on the current machine/platform. 185262ef3760SMichael Roth * Since these are the only options available for pseries-2.7 and 185362ef3760SMichael Roth * earlier, this allows us to maintain old->new/new->old migration 185462ef3760SMichael Roth * compatibility. 185562ef3760SMichael Roth * 185662ef3760SMichael Roth * For QEMU 2.8+, there are additional CAS-negotiatable options available 185762ef3760SMichael Roth * via default pseries-2.8 machines and explicit command-line parameters. 185862ef3760SMichael Roth * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware 185962ef3760SMichael Roth * of the actual CAS-negotiated values to continue working properly. For 186062ef3760SMichael Roth * example, availability of memory unplug depends on knowing whether 186162ef3760SMichael Roth * OV5_HP_EVT was negotiated via CAS. 186262ef3760SMichael Roth * 186362ef3760SMichael Roth * Thus, for any cases where the set of available CAS-negotiatable 186462ef3760SMichael Roth * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we 1865aef19c04SGreg Kurz * include the CAS-negotiated options in the migration stream, unless 1866aef19c04SGreg Kurz * if they affect boot time behaviour only. 186762ef3760SMichael Roth */ 186862ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY); 186962ef3760SMichael Roth spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY); 1870aef19c04SGreg Kurz spapr_ovec_set(ov5_mask, OV5_DRMEM_V2); 187162ef3760SMichael Roth 1872d1d32d62SDavid Gibson /* We need extra information if we have any bits outside the mask 1873d1d32d62SDavid Gibson * defined above */ 1874d1d32d62SDavid Gibson cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask); 187562ef3760SMichael Roth 187662ef3760SMichael Roth spapr_ovec_cleanup(ov5_mask); 187762ef3760SMichael Roth 187862ef3760SMichael Roth return cas_needed; 187962ef3760SMichael Roth } 188062ef3760SMichael Roth 188162ef3760SMichael Roth static const VMStateDescription vmstate_spapr_ov5_cas = { 188262ef3760SMichael Roth .name = "spapr_option_vector_ov5_cas", 188362ef3760SMichael Roth .version_id = 1, 188462ef3760SMichael Roth .minimum_version_id = 1, 188562ef3760SMichael Roth .needed = spapr_ov5_cas_needed, 188662ef3760SMichael Roth .fields = (VMStateField[]) { 1887ce2918cbSDavid Gibson VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1, 1888ce2918cbSDavid Gibson vmstate_spapr_ovec, SpaprOptionVector), 188962ef3760SMichael Roth VMSTATE_END_OF_LIST() 189062ef3760SMichael Roth }, 189162ef3760SMichael Roth }; 189262ef3760SMichael Roth 18939861bb3eSSuraj Jitindar Singh static bool spapr_patb_entry_needed(void *opaque) 18949861bb3eSSuraj Jitindar Singh { 1895ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 18969861bb3eSSuraj Jitindar Singh 18979861bb3eSSuraj Jitindar Singh return !!spapr->patb_entry; 18989861bb3eSSuraj Jitindar Singh } 18999861bb3eSSuraj Jitindar Singh 19009861bb3eSSuraj Jitindar Singh static const VMStateDescription vmstate_spapr_patb_entry = { 19019861bb3eSSuraj Jitindar Singh .name = "spapr_patb_entry", 19029861bb3eSSuraj Jitindar Singh .version_id = 1, 19039861bb3eSSuraj Jitindar Singh .minimum_version_id = 1, 19049861bb3eSSuraj Jitindar Singh .needed = spapr_patb_entry_needed, 19059861bb3eSSuraj Jitindar Singh .fields = (VMStateField[]) { 1906ce2918cbSDavid Gibson VMSTATE_UINT64(patb_entry, SpaprMachineState), 19079861bb3eSSuraj Jitindar Singh VMSTATE_END_OF_LIST() 19089861bb3eSSuraj Jitindar Singh }, 19099861bb3eSSuraj Jitindar Singh }; 19109861bb3eSSuraj Jitindar Singh 191182cffa2eSCédric Le Goater static bool spapr_irq_map_needed(void *opaque) 191282cffa2eSCédric Le Goater { 1913ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 191482cffa2eSCédric Le Goater 191582cffa2eSCédric Le Goater return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr); 191682cffa2eSCédric Le Goater } 191782cffa2eSCédric Le Goater 191882cffa2eSCédric Le Goater static const VMStateDescription vmstate_spapr_irq_map = { 191982cffa2eSCédric Le Goater .name = "spapr_irq_map", 192082cffa2eSCédric Le Goater .version_id = 1, 192182cffa2eSCédric Le Goater .minimum_version_id = 1, 192282cffa2eSCédric Le Goater .needed = spapr_irq_map_needed, 192382cffa2eSCédric Le Goater .fields = (VMStateField[]) { 1924ce2918cbSDavid Gibson VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr), 192582cffa2eSCédric Le Goater VMSTATE_END_OF_LIST() 192682cffa2eSCédric Le Goater }, 192782cffa2eSCédric Le Goater }; 192882cffa2eSCédric Le Goater 1929fea35ca4SAlexey Kardashevskiy static bool spapr_dtb_needed(void *opaque) 1930fea35ca4SAlexey Kardashevskiy { 1931ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque); 1932fea35ca4SAlexey Kardashevskiy 1933fea35ca4SAlexey Kardashevskiy return smc->update_dt_enabled; 1934fea35ca4SAlexey Kardashevskiy } 1935fea35ca4SAlexey Kardashevskiy 1936fea35ca4SAlexey Kardashevskiy static int spapr_dtb_pre_load(void *opaque) 1937fea35ca4SAlexey Kardashevskiy { 1938ce2918cbSDavid Gibson SpaprMachineState *spapr = (SpaprMachineState *)opaque; 1939fea35ca4SAlexey Kardashevskiy 1940fea35ca4SAlexey Kardashevskiy g_free(spapr->fdt_blob); 1941fea35ca4SAlexey Kardashevskiy spapr->fdt_blob = NULL; 1942fea35ca4SAlexey Kardashevskiy spapr->fdt_size = 0; 1943fea35ca4SAlexey Kardashevskiy 1944fea35ca4SAlexey Kardashevskiy return 0; 1945fea35ca4SAlexey Kardashevskiy } 1946fea35ca4SAlexey Kardashevskiy 1947fea35ca4SAlexey Kardashevskiy static const VMStateDescription vmstate_spapr_dtb = { 1948fea35ca4SAlexey Kardashevskiy .name = "spapr_dtb", 1949fea35ca4SAlexey Kardashevskiy .version_id = 1, 1950fea35ca4SAlexey Kardashevskiy .minimum_version_id = 1, 1951fea35ca4SAlexey Kardashevskiy .needed = spapr_dtb_needed, 1952fea35ca4SAlexey Kardashevskiy .pre_load = spapr_dtb_pre_load, 1953fea35ca4SAlexey Kardashevskiy .fields = (VMStateField[]) { 1954ce2918cbSDavid Gibson VMSTATE_UINT32(fdt_initial_size, SpaprMachineState), 1955ce2918cbSDavid Gibson VMSTATE_UINT32(fdt_size, SpaprMachineState), 1956ce2918cbSDavid Gibson VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL, 1957fea35ca4SAlexey Kardashevskiy fdt_size), 1958fea35ca4SAlexey Kardashevskiy VMSTATE_END_OF_LIST() 1959fea35ca4SAlexey Kardashevskiy }, 1960fea35ca4SAlexey Kardashevskiy }; 1961fea35ca4SAlexey Kardashevskiy 19624be21d56SDavid Gibson static const VMStateDescription vmstate_spapr = { 19634be21d56SDavid Gibson .name = "spapr", 1964880ae7deSDavid Gibson .version_id = 3, 19654be21d56SDavid Gibson .minimum_version_id = 1, 19664e5fe368SSuraj Jitindar Singh .pre_load = spapr_pre_load, 1967880ae7deSDavid Gibson .post_load = spapr_post_load, 19684e5fe368SSuraj Jitindar Singh .pre_save = spapr_pre_save, 19694be21d56SDavid Gibson .fields = (VMStateField[]) { 1970880ae7deSDavid Gibson /* used to be @next_irq */ 1971880ae7deSDavid Gibson VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4), 19724be21d56SDavid Gibson 19734be21d56SDavid Gibson /* RTC offset */ 1974ce2918cbSDavid Gibson VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3), 1975880ae7deSDavid Gibson 1976ce2918cbSDavid Gibson VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2), 19774be21d56SDavid Gibson VMSTATE_END_OF_LIST() 19784be21d56SDavid Gibson }, 197962ef3760SMichael Roth .subsections = (const VMStateDescription*[]) { 198062ef3760SMichael Roth &vmstate_spapr_ov5_cas, 19819861bb3eSSuraj Jitindar Singh &vmstate_spapr_patb_entry, 1982fd38804bSDaniel Henrique Barboza &vmstate_spapr_pending_events, 19834e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_htm, 19844e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_vsx, 19854e5fe368SSuraj Jitindar Singh &vmstate_spapr_cap_dfp, 19868f38eaf8SSuraj Jitindar Singh &vmstate_spapr_cap_cfpc, 198709114fd8SSuraj Jitindar Singh &vmstate_spapr_cap_sbbc, 19884be8d4e7SSuraj Jitindar Singh &vmstate_spapr_cap_ibs, 198964d4a534SDavid Gibson &vmstate_spapr_cap_hpt_maxpagesize, 199082cffa2eSCédric Le Goater &vmstate_spapr_irq_map, 1991b9a477b7SSuraj Jitindar Singh &vmstate_spapr_cap_nested_kvm_hv, 1992fea35ca4SAlexey Kardashevskiy &vmstate_spapr_dtb, 1993c982f5cfSSuraj Jitindar Singh &vmstate_spapr_cap_large_decr, 19948ff43ee4SSuraj Jitindar Singh &vmstate_spapr_cap_ccf_assist, 199562ef3760SMichael Roth NULL 199662ef3760SMichael Roth } 19974be21d56SDavid Gibson }; 19984be21d56SDavid Gibson 19994be21d56SDavid Gibson static int htab_save_setup(QEMUFile *f, void *opaque) 20004be21d56SDavid Gibson { 2001ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 20024be21d56SDavid Gibson 20034be21d56SDavid Gibson /* "Iteration" header */ 20043a384297SBharata B Rao if (!spapr->htab_shift) { 20053a384297SBharata B Rao qemu_put_be32(f, -1); 20063a384297SBharata B Rao } else { 20074be21d56SDavid Gibson qemu_put_be32(f, spapr->htab_shift); 20083a384297SBharata B Rao } 20094be21d56SDavid Gibson 2010e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 2011e68cb8b4SAlexey Kardashevskiy spapr->htab_save_index = 0; 2012e68cb8b4SAlexey Kardashevskiy spapr->htab_first_pass = true; 2013e68cb8b4SAlexey Kardashevskiy } else { 20143a384297SBharata B Rao if (spapr->htab_shift) { 2015e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 20164be21d56SDavid Gibson } 20173a384297SBharata B Rao } 20184be21d56SDavid Gibson 2019e68cb8b4SAlexey Kardashevskiy 2020e68cb8b4SAlexey Kardashevskiy return 0; 2021e68cb8b4SAlexey Kardashevskiy } 20224be21d56SDavid Gibson 2023ce2918cbSDavid Gibson static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr, 2024332f7721SGreg Kurz int chunkstart, int n_valid, int n_invalid) 2025332f7721SGreg Kurz { 2026332f7721SGreg Kurz qemu_put_be32(f, chunkstart); 2027332f7721SGreg Kurz qemu_put_be16(f, n_valid); 2028332f7721SGreg Kurz qemu_put_be16(f, n_invalid); 2029332f7721SGreg Kurz qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), 2030332f7721SGreg Kurz HASH_PTE_SIZE_64 * n_valid); 2031332f7721SGreg Kurz } 2032332f7721SGreg Kurz 2033332f7721SGreg Kurz static void htab_save_end_marker(QEMUFile *f) 2034332f7721SGreg Kurz { 2035332f7721SGreg Kurz qemu_put_be32(f, 0); 2036332f7721SGreg Kurz qemu_put_be16(f, 0); 2037332f7721SGreg Kurz qemu_put_be16(f, 0); 2038332f7721SGreg Kurz } 2039332f7721SGreg Kurz 2040ce2918cbSDavid Gibson static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr, 20414be21d56SDavid Gibson int64_t max_ns) 20424be21d56SDavid Gibson { 2043378bc217SDavid Gibson bool has_timeout = max_ns != -1; 20444be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 20454be21d56SDavid Gibson int index = spapr->htab_save_index; 2046bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 20474be21d56SDavid Gibson 20484be21d56SDavid Gibson assert(spapr->htab_first_pass); 20494be21d56SDavid Gibson 20504be21d56SDavid Gibson do { 20514be21d56SDavid Gibson int chunkstart; 20524be21d56SDavid Gibson 20534be21d56SDavid Gibson /* Consume invalid HPTEs */ 20544be21d56SDavid Gibson while ((index < htabslots) 20554be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 20564be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 205724ec2863SMarc-André Lureau index++; 20584be21d56SDavid Gibson } 20594be21d56SDavid Gibson 20604be21d56SDavid Gibson /* Consume valid HPTEs */ 20614be21d56SDavid Gibson chunkstart = index; 2062338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 20634be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 20644be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 206524ec2863SMarc-André Lureau index++; 20664be21d56SDavid Gibson } 20674be21d56SDavid Gibson 20684be21d56SDavid Gibson if (index > chunkstart) { 20694be21d56SDavid Gibson int n_valid = index - chunkstart; 20704be21d56SDavid Gibson 2071332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, 0); 20724be21d56SDavid Gibson 2073378bc217SDavid Gibson if (has_timeout && 2074378bc217SDavid Gibson (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 20754be21d56SDavid Gibson break; 20764be21d56SDavid Gibson } 20774be21d56SDavid Gibson } 20784be21d56SDavid Gibson } while ((index < htabslots) && !qemu_file_rate_limit(f)); 20794be21d56SDavid Gibson 20804be21d56SDavid Gibson if (index >= htabslots) { 20814be21d56SDavid Gibson assert(index == htabslots); 20824be21d56SDavid Gibson index = 0; 20834be21d56SDavid Gibson spapr->htab_first_pass = false; 20844be21d56SDavid Gibson } 20854be21d56SDavid Gibson spapr->htab_save_index = index; 20864be21d56SDavid Gibson } 20874be21d56SDavid Gibson 2088ce2918cbSDavid Gibson static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, 20894be21d56SDavid Gibson int64_t max_ns) 20904be21d56SDavid Gibson { 20914be21d56SDavid Gibson bool final = max_ns < 0; 20924be21d56SDavid Gibson int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 20934be21d56SDavid Gibson int examined = 0, sent = 0; 20944be21d56SDavid Gibson int index = spapr->htab_save_index; 2095bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 20964be21d56SDavid Gibson 20974be21d56SDavid Gibson assert(!spapr->htab_first_pass); 20984be21d56SDavid Gibson 20994be21d56SDavid Gibson do { 21004be21d56SDavid Gibson int chunkstart, invalidstart; 21014be21d56SDavid Gibson 21024be21d56SDavid Gibson /* Consume non-dirty HPTEs */ 21034be21d56SDavid Gibson while ((index < htabslots) 21044be21d56SDavid Gibson && !HPTE_DIRTY(HPTE(spapr->htab, index))) { 21054be21d56SDavid Gibson index++; 21064be21d56SDavid Gibson examined++; 21074be21d56SDavid Gibson } 21084be21d56SDavid Gibson 21094be21d56SDavid Gibson chunkstart = index; 21104be21d56SDavid Gibson /* Consume valid dirty HPTEs */ 2111338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - chunkstart < USHRT_MAX) 21124be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 21134be21d56SDavid Gibson && HPTE_VALID(HPTE(spapr->htab, index))) { 21144be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 21154be21d56SDavid Gibson index++; 21164be21d56SDavid Gibson examined++; 21174be21d56SDavid Gibson } 21184be21d56SDavid Gibson 21194be21d56SDavid Gibson invalidstart = index; 21204be21d56SDavid Gibson /* Consume invalid dirty HPTEs */ 2121338c25b6SSamuel Mendoza-Jonas while ((index < htabslots) && (index - invalidstart < USHRT_MAX) 21224be21d56SDavid Gibson && HPTE_DIRTY(HPTE(spapr->htab, index)) 21234be21d56SDavid Gibson && !HPTE_VALID(HPTE(spapr->htab, index))) { 21244be21d56SDavid Gibson CLEAN_HPTE(HPTE(spapr->htab, index)); 21254be21d56SDavid Gibson index++; 21264be21d56SDavid Gibson examined++; 21274be21d56SDavid Gibson } 21284be21d56SDavid Gibson 21294be21d56SDavid Gibson if (index > chunkstart) { 21304be21d56SDavid Gibson int n_valid = invalidstart - chunkstart; 21314be21d56SDavid Gibson int n_invalid = index - invalidstart; 21324be21d56SDavid Gibson 2133332f7721SGreg Kurz htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid); 21344be21d56SDavid Gibson sent += index - chunkstart; 21354be21d56SDavid Gibson 2136bc72ad67SAlex Bligh if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 21374be21d56SDavid Gibson break; 21384be21d56SDavid Gibson } 21394be21d56SDavid Gibson } 21404be21d56SDavid Gibson 21414be21d56SDavid Gibson if (examined >= htabslots) { 21424be21d56SDavid Gibson break; 21434be21d56SDavid Gibson } 21444be21d56SDavid Gibson 21454be21d56SDavid Gibson if (index >= htabslots) { 21464be21d56SDavid Gibson assert(index == htabslots); 21474be21d56SDavid Gibson index = 0; 21484be21d56SDavid Gibson } 21494be21d56SDavid Gibson } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); 21504be21d56SDavid Gibson 21514be21d56SDavid Gibson if (index >= htabslots) { 21524be21d56SDavid Gibson assert(index == htabslots); 21534be21d56SDavid Gibson index = 0; 21544be21d56SDavid Gibson } 21554be21d56SDavid Gibson 21564be21d56SDavid Gibson spapr->htab_save_index = index; 21574be21d56SDavid Gibson 2158e68cb8b4SAlexey Kardashevskiy return (examined >= htabslots) && (sent == 0) ? 1 : 0; 21594be21d56SDavid Gibson } 21604be21d56SDavid Gibson 2161e68cb8b4SAlexey Kardashevskiy #define MAX_ITERATION_NS 5000000 /* 5 ms */ 2162e68cb8b4SAlexey Kardashevskiy #define MAX_KVM_BUF_SIZE 2048 2163e68cb8b4SAlexey Kardashevskiy 21644be21d56SDavid Gibson static int htab_save_iterate(QEMUFile *f, void *opaque) 21654be21d56SDavid Gibson { 2166ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2167715c5407SDavid Gibson int fd; 2168e68cb8b4SAlexey Kardashevskiy int rc = 0; 21694be21d56SDavid Gibson 21704be21d56SDavid Gibson /* Iteration header */ 21713a384297SBharata B Rao if (!spapr->htab_shift) { 21723a384297SBharata B Rao qemu_put_be32(f, -1); 2173e8cd4247SLaurent Vivier return 1; 21743a384297SBharata B Rao } else { 21754be21d56SDavid Gibson qemu_put_be32(f, 0); 21763a384297SBharata B Rao } 21774be21d56SDavid Gibson 2178e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2179e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2180e68cb8b4SAlexey Kardashevskiy 2181715c5407SDavid Gibson fd = get_htab_fd(spapr); 2182715c5407SDavid Gibson if (fd < 0) { 2183715c5407SDavid Gibson return fd; 218401a57972SSamuel Mendoza-Jonas } 218501a57972SSamuel Mendoza-Jonas 2186715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS); 2187e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2188e68cb8b4SAlexey Kardashevskiy return rc; 2189e68cb8b4SAlexey Kardashevskiy } 2190e68cb8b4SAlexey Kardashevskiy } else if (spapr->htab_first_pass) { 21914be21d56SDavid Gibson htab_save_first_pass(f, spapr, MAX_ITERATION_NS); 21924be21d56SDavid Gibson } else { 2193e68cb8b4SAlexey Kardashevskiy rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS); 21944be21d56SDavid Gibson } 21954be21d56SDavid Gibson 2196332f7721SGreg Kurz htab_save_end_marker(f); 21974be21d56SDavid Gibson 2198e68cb8b4SAlexey Kardashevskiy return rc; 21994be21d56SDavid Gibson } 22004be21d56SDavid Gibson 22014be21d56SDavid Gibson static int htab_save_complete(QEMUFile *f, void *opaque) 22024be21d56SDavid Gibson { 2203ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2204715c5407SDavid Gibson int fd; 22054be21d56SDavid Gibson 22064be21d56SDavid Gibson /* Iteration header */ 22073a384297SBharata B Rao if (!spapr->htab_shift) { 22083a384297SBharata B Rao qemu_put_be32(f, -1); 22093a384297SBharata B Rao return 0; 22103a384297SBharata B Rao } else { 22114be21d56SDavid Gibson qemu_put_be32(f, 0); 22123a384297SBharata B Rao } 22134be21d56SDavid Gibson 2214e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2215e68cb8b4SAlexey Kardashevskiy int rc; 2216e68cb8b4SAlexey Kardashevskiy 2217e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2218e68cb8b4SAlexey Kardashevskiy 2219715c5407SDavid Gibson fd = get_htab_fd(spapr); 2220715c5407SDavid Gibson if (fd < 0) { 2221715c5407SDavid Gibson return fd; 222201a57972SSamuel Mendoza-Jonas } 222301a57972SSamuel Mendoza-Jonas 2224715c5407SDavid Gibson rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1); 2225e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2226e68cb8b4SAlexey Kardashevskiy return rc; 2227e68cb8b4SAlexey Kardashevskiy } 2228e68cb8b4SAlexey Kardashevskiy } else { 2229378bc217SDavid Gibson if (spapr->htab_first_pass) { 2230378bc217SDavid Gibson htab_save_first_pass(f, spapr, -1); 2231378bc217SDavid Gibson } 22324be21d56SDavid Gibson htab_save_later_pass(f, spapr, -1); 2233e68cb8b4SAlexey Kardashevskiy } 22344be21d56SDavid Gibson 22354be21d56SDavid Gibson /* End marker */ 2236332f7721SGreg Kurz htab_save_end_marker(f); 22374be21d56SDavid Gibson 22384be21d56SDavid Gibson return 0; 22394be21d56SDavid Gibson } 22404be21d56SDavid Gibson 22414be21d56SDavid Gibson static int htab_load(QEMUFile *f, void *opaque, int version_id) 22424be21d56SDavid Gibson { 2243ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 22444be21d56SDavid Gibson uint32_t section_hdr; 2245e68cb8b4SAlexey Kardashevskiy int fd = -1; 224614b0d748SGreg Kurz Error *local_err = NULL; 22474be21d56SDavid Gibson 22484be21d56SDavid Gibson if (version_id < 1 || version_id > 1) { 224998a5d100SDavid Gibson error_report("htab_load() bad version"); 22504be21d56SDavid Gibson return -EINVAL; 22514be21d56SDavid Gibson } 22524be21d56SDavid Gibson 22534be21d56SDavid Gibson section_hdr = qemu_get_be32(f); 22544be21d56SDavid Gibson 22553a384297SBharata B Rao if (section_hdr == -1) { 22563a384297SBharata B Rao spapr_free_hpt(spapr); 22573a384297SBharata B Rao return 0; 22583a384297SBharata B Rao } 22593a384297SBharata B Rao 22604be21d56SDavid Gibson if (section_hdr) { 2261c5f54f3eSDavid Gibson /* First section gives the htab size */ 2262c5f54f3eSDavid Gibson spapr_reallocate_hpt(spapr, section_hdr, &local_err); 2263c5f54f3eSDavid Gibson if (local_err) { 2264c5f54f3eSDavid Gibson error_report_err(local_err); 22654be21d56SDavid Gibson return -EINVAL; 22664be21d56SDavid Gibson } 22674be21d56SDavid Gibson return 0; 22684be21d56SDavid Gibson } 22694be21d56SDavid Gibson 2270e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2271e68cb8b4SAlexey Kardashevskiy assert(kvm_enabled()); 2272e68cb8b4SAlexey Kardashevskiy 227314b0d748SGreg Kurz fd = kvmppc_get_htab_fd(true, 0, &local_err); 2274e68cb8b4SAlexey Kardashevskiy if (fd < 0) { 227514b0d748SGreg Kurz error_report_err(local_err); 227682be8e73SGreg Kurz return fd; 2277e68cb8b4SAlexey Kardashevskiy } 2278e68cb8b4SAlexey Kardashevskiy } 2279e68cb8b4SAlexey Kardashevskiy 22804be21d56SDavid Gibson while (true) { 22814be21d56SDavid Gibson uint32_t index; 22824be21d56SDavid Gibson uint16_t n_valid, n_invalid; 22834be21d56SDavid Gibson 22844be21d56SDavid Gibson index = qemu_get_be32(f); 22854be21d56SDavid Gibson n_valid = qemu_get_be16(f); 22864be21d56SDavid Gibson n_invalid = qemu_get_be16(f); 22874be21d56SDavid Gibson 22884be21d56SDavid Gibson if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) { 22894be21d56SDavid Gibson /* End of Stream */ 22904be21d56SDavid Gibson break; 22914be21d56SDavid Gibson } 22924be21d56SDavid Gibson 2293e68cb8b4SAlexey Kardashevskiy if ((index + n_valid + n_invalid) > 22944be21d56SDavid Gibson (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) { 22954be21d56SDavid Gibson /* Bad index in stream */ 229698a5d100SDavid Gibson error_report( 229798a5d100SDavid Gibson "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)", 229898a5d100SDavid Gibson index, n_valid, n_invalid, spapr->htab_shift); 22994be21d56SDavid Gibson return -EINVAL; 23004be21d56SDavid Gibson } 23014be21d56SDavid Gibson 2302e68cb8b4SAlexey Kardashevskiy if (spapr->htab) { 23034be21d56SDavid Gibson if (n_valid) { 23044be21d56SDavid Gibson qemu_get_buffer(f, HPTE(spapr->htab, index), 23054be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_valid); 23064be21d56SDavid Gibson } 23074be21d56SDavid Gibson if (n_invalid) { 23084be21d56SDavid Gibson memset(HPTE(spapr->htab, index + n_valid), 0, 23094be21d56SDavid Gibson HASH_PTE_SIZE_64 * n_invalid); 23104be21d56SDavid Gibson } 2311e68cb8b4SAlexey Kardashevskiy } else { 2312e68cb8b4SAlexey Kardashevskiy int rc; 2313e68cb8b4SAlexey Kardashevskiy 2314e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2315e68cb8b4SAlexey Kardashevskiy 2316e68cb8b4SAlexey Kardashevskiy rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid); 2317e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2318e68cb8b4SAlexey Kardashevskiy return rc; 2319e68cb8b4SAlexey Kardashevskiy } 2320e68cb8b4SAlexey Kardashevskiy } 2321e68cb8b4SAlexey Kardashevskiy } 2322e68cb8b4SAlexey Kardashevskiy 2323e68cb8b4SAlexey Kardashevskiy if (!spapr->htab) { 2324e68cb8b4SAlexey Kardashevskiy assert(fd >= 0); 2325e68cb8b4SAlexey Kardashevskiy close(fd); 23264be21d56SDavid Gibson } 23274be21d56SDavid Gibson 23284be21d56SDavid Gibson return 0; 23294be21d56SDavid Gibson } 23304be21d56SDavid Gibson 233170f794fcSJuan Quintela static void htab_save_cleanup(void *opaque) 2332c573fc03SThomas Huth { 2333ce2918cbSDavid Gibson SpaprMachineState *spapr = opaque; 2334c573fc03SThomas Huth 2335c573fc03SThomas Huth close_htab_fd(spapr); 2336c573fc03SThomas Huth } 2337c573fc03SThomas Huth 23384be21d56SDavid Gibson static SaveVMHandlers savevm_htab_handlers = { 23399907e842SJuan Quintela .save_setup = htab_save_setup, 23404be21d56SDavid Gibson .save_live_iterate = htab_save_iterate, 2341a3e06c3dSDr. David Alan Gilbert .save_live_complete_precopy = htab_save_complete, 234270f794fcSJuan Quintela .save_cleanup = htab_save_cleanup, 23434be21d56SDavid Gibson .load_state = htab_load, 23444be21d56SDavid Gibson }; 23454be21d56SDavid Gibson 23465b2128d2SAlexander Graf static void spapr_boot_set(void *opaque, const char *boot_device, 23475b2128d2SAlexander Graf Error **errp) 23485b2128d2SAlexander Graf { 2349c86c1affSDaniel Henrique Barboza MachineState *machine = MACHINE(opaque); 23505b2128d2SAlexander Graf machine->boot_order = g_strdup(boot_device); 23515b2128d2SAlexander Graf } 23525b2128d2SAlexander Graf 2353ce2918cbSDavid Gibson static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr) 2354224245bfSDavid Gibson { 2355224245bfSDavid Gibson MachineState *machine = MACHINE(spapr); 2356224245bfSDavid Gibson uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; 2357e8f986fcSBharata B Rao uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size; 2358224245bfSDavid Gibson int i; 2359224245bfSDavid Gibson 2360224245bfSDavid Gibson for (i = 0; i < nr_lmbs; i++) { 2361224245bfSDavid Gibson uint64_t addr; 2362224245bfSDavid Gibson 2363b0c14ec4SDavid Hildenbrand addr = i * lmb_size + machine->device_memory->base; 23646caf3ac6SDavid Gibson spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB, 2365224245bfSDavid Gibson addr / lmb_size); 2366224245bfSDavid Gibson } 2367224245bfSDavid Gibson } 2368224245bfSDavid Gibson 2369224245bfSDavid Gibson /* 2370224245bfSDavid Gibson * If RAM size, maxmem size and individual node mem sizes aren't aligned 2371224245bfSDavid Gibson * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest 2372224245bfSDavid Gibson * since we can't support such unaligned sizes with DRCONF_MEMORY. 2373224245bfSDavid Gibson */ 23747c150d6fSDavid Gibson static void spapr_validate_node_memory(MachineState *machine, Error **errp) 2375224245bfSDavid Gibson { 2376224245bfSDavid Gibson int i; 2377224245bfSDavid Gibson 23787c150d6fSDavid Gibson if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) { 23797c150d6fSDavid Gibson error_setg(errp, "Memory size 0x" RAM_ADDR_FMT 2380ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 23817c150d6fSDavid Gibson machine->ram_size, 2382d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 23837c150d6fSDavid Gibson return; 23847c150d6fSDavid Gibson } 23857c150d6fSDavid Gibson 23867c150d6fSDavid Gibson if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) { 23877c150d6fSDavid Gibson error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT 2388ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 23897c150d6fSDavid Gibson machine->ram_size, 2390d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 23917c150d6fSDavid Gibson return; 2392224245bfSDavid Gibson } 2393224245bfSDavid Gibson 2394aa570207STao Xu for (i = 0; i < machine->numa_state->num_nodes; i++) { 23957e721e7bSTao Xu if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) { 23967c150d6fSDavid Gibson error_setg(errp, 23977c150d6fSDavid Gibson "Node %d memory size 0x%" PRIx64 2398ab3dd749SPhilippe Mathieu-Daudé " is not aligned to %" PRIu64 " MiB", 23997e721e7bSTao Xu i, machine->numa_state->nodes[i].node_mem, 2400d23b6caaSPhilippe Mathieu-Daudé SPAPR_MEMORY_BLOCK_SIZE / MiB); 24017c150d6fSDavid Gibson return; 2402224245bfSDavid Gibson } 2403224245bfSDavid Gibson } 2404224245bfSDavid Gibson } 2405224245bfSDavid Gibson 2406535455fdSIgor Mammedov /* find cpu slot in machine->possible_cpus by core_id */ 2407535455fdSIgor Mammedov static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx) 2408535455fdSIgor Mammedov { 2409fe6b6346SLike Xu int index = id / ms->smp.threads; 2410535455fdSIgor Mammedov 2411535455fdSIgor Mammedov if (index >= ms->possible_cpus->len) { 2412535455fdSIgor Mammedov return NULL; 2413535455fdSIgor Mammedov } 2414535455fdSIgor Mammedov if (idx) { 2415535455fdSIgor Mammedov *idx = index; 2416535455fdSIgor Mammedov } 2417535455fdSIgor Mammedov return &ms->possible_cpus->cpus[index]; 2418535455fdSIgor Mammedov } 2419535455fdSIgor Mammedov 2420ce2918cbSDavid Gibson static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp) 2421fa98fbfcSSam Bobroff { 2422fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 242329cb4187SGreg Kurz SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 2424fa98fbfcSSam Bobroff Error *local_err = NULL; 2425fa98fbfcSSam Bobroff bool vsmt_user = !!spapr->vsmt; 2426fa98fbfcSSam Bobroff int kvm_smt = kvmppc_smt_threads(); 2427fa98fbfcSSam Bobroff int ret; 2428fe6b6346SLike Xu unsigned int smp_threads = ms->smp.threads; 2429fa98fbfcSSam Bobroff 2430fa98fbfcSSam Bobroff if (!kvm_enabled() && (smp_threads > 1)) { 2431fa98fbfcSSam Bobroff error_setg(&local_err, "TCG cannot support more than 1 thread/core " 2432fa98fbfcSSam Bobroff "on a pseries machine"); 2433fa98fbfcSSam Bobroff goto out; 2434fa98fbfcSSam Bobroff } 2435fa98fbfcSSam Bobroff if (!is_power_of_2(smp_threads)) { 2436fa98fbfcSSam Bobroff error_setg(&local_err, "Cannot support %d threads/core on a pseries " 2437fa98fbfcSSam Bobroff "machine because it must be a power of 2", smp_threads); 2438fa98fbfcSSam Bobroff goto out; 2439fa98fbfcSSam Bobroff } 2440fa98fbfcSSam Bobroff 2441fa98fbfcSSam Bobroff /* Detemine the VSMT mode to use: */ 2442fa98fbfcSSam Bobroff if (vsmt_user) { 2443fa98fbfcSSam Bobroff if (spapr->vsmt < smp_threads) { 2444fa98fbfcSSam Bobroff error_setg(&local_err, "Cannot support VSMT mode %d" 2445fa98fbfcSSam Bobroff " because it must be >= threads/core (%d)", 2446fa98fbfcSSam Bobroff spapr->vsmt, smp_threads); 2447fa98fbfcSSam Bobroff goto out; 2448fa98fbfcSSam Bobroff } 2449fa98fbfcSSam Bobroff /* In this case, spapr->vsmt has been set by the command line */ 245029cb4187SGreg Kurz } else if (!smc->smp_threads_vsmt) { 24518904e5a7SDavid Gibson /* 24528904e5a7SDavid Gibson * Default VSMT value is tricky, because we need it to be as 24538904e5a7SDavid Gibson * consistent as possible (for migration), but this requires 24548904e5a7SDavid Gibson * changing it for at least some existing cases. We pick 8 as 24558904e5a7SDavid Gibson * the value that we'd get with KVM on POWER8, the 24568904e5a7SDavid Gibson * overwhelmingly common case in production systems. 24578904e5a7SDavid Gibson */ 24584ad64cbdSLaurent Vivier spapr->vsmt = MAX(8, smp_threads); 245929cb4187SGreg Kurz } else { 246029cb4187SGreg Kurz spapr->vsmt = smp_threads; 2461fa98fbfcSSam Bobroff } 2462fa98fbfcSSam Bobroff 2463fa98fbfcSSam Bobroff /* KVM: If necessary, set the SMT mode: */ 2464fa98fbfcSSam Bobroff if (kvm_enabled() && (spapr->vsmt != kvm_smt)) { 2465fa98fbfcSSam Bobroff ret = kvmppc_set_smt_threads(spapr->vsmt); 2466fa98fbfcSSam Bobroff if (ret) { 24671f20f2e0SDavid Gibson /* Looks like KVM isn't able to change VSMT mode */ 2468fa98fbfcSSam Bobroff error_setg(&local_err, 2469fa98fbfcSSam Bobroff "Failed to set KVM's VSMT mode to %d (errno %d)", 2470fa98fbfcSSam Bobroff spapr->vsmt, ret); 24711f20f2e0SDavid Gibson /* We can live with that if the default one is big enough 24721f20f2e0SDavid Gibson * for the number of threads, and a submultiple of the one 24731f20f2e0SDavid Gibson * we want. In this case we'll waste some vcpu ids, but 24741f20f2e0SDavid Gibson * behaviour will be correct */ 24751f20f2e0SDavid Gibson if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) { 24761f20f2e0SDavid Gibson warn_report_err(local_err); 24771f20f2e0SDavid Gibson local_err = NULL; 24781f20f2e0SDavid Gibson goto out; 24791f20f2e0SDavid Gibson } else { 2480fa98fbfcSSam Bobroff if (!vsmt_user) { 24811f20f2e0SDavid Gibson error_append_hint(&local_err, 24821f20f2e0SDavid Gibson "On PPC, a VM with %d threads/core" 24831f20f2e0SDavid Gibson " on a host with %d threads/core" 24841f20f2e0SDavid Gibson " requires the use of VSMT mode %d.\n", 2485fa98fbfcSSam Bobroff smp_threads, kvm_smt, spapr->vsmt); 2486fa98fbfcSSam Bobroff } 2487cdcca22aSVladimir Sementsov-Ogievskiy kvmppc_error_append_smt_possible_hint(&local_err); 2488fa98fbfcSSam Bobroff goto out; 2489fa98fbfcSSam Bobroff } 2490fa98fbfcSSam Bobroff } 24911f20f2e0SDavid Gibson } 2492fa98fbfcSSam Bobroff /* else TCG: nothing to do currently */ 2493fa98fbfcSSam Bobroff out: 2494fa98fbfcSSam Bobroff error_propagate(errp, local_err); 2495fa98fbfcSSam Bobroff } 2496fa98fbfcSSam Bobroff 2497ce2918cbSDavid Gibson static void spapr_init_cpus(SpaprMachineState *spapr) 24981a5008fcSGreg Kurz { 24991a5008fcSGreg Kurz MachineState *machine = MACHINE(spapr); 25001a5008fcSGreg Kurz MachineClass *mc = MACHINE_GET_CLASS(machine); 2501ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 25021a5008fcSGreg Kurz const char *type = spapr_get_cpu_core_type(machine->cpu_type); 25031a5008fcSGreg Kurz const CPUArchIdList *possible_cpus; 2504fe6b6346SLike Xu unsigned int smp_cpus = machine->smp.cpus; 2505fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 2506fe6b6346SLike Xu unsigned int max_cpus = machine->smp.max_cpus; 25071a5008fcSGreg Kurz int boot_cores_nr = smp_cpus / smp_threads; 25081a5008fcSGreg Kurz int i; 25091a5008fcSGreg Kurz 25101a5008fcSGreg Kurz possible_cpus = mc->possible_cpu_arch_ids(machine); 25111a5008fcSGreg Kurz if (mc->has_hotpluggable_cpus) { 25121a5008fcSGreg Kurz if (smp_cpus % smp_threads) { 25131a5008fcSGreg Kurz error_report("smp_cpus (%u) must be multiple of threads (%u)", 25141a5008fcSGreg Kurz smp_cpus, smp_threads); 25151a5008fcSGreg Kurz exit(1); 25161a5008fcSGreg Kurz } 25171a5008fcSGreg Kurz if (max_cpus % smp_threads) { 25181a5008fcSGreg Kurz error_report("max_cpus (%u) must be multiple of threads (%u)", 25191a5008fcSGreg Kurz max_cpus, smp_threads); 25201a5008fcSGreg Kurz exit(1); 25211a5008fcSGreg Kurz } 25221a5008fcSGreg Kurz } else { 25231a5008fcSGreg Kurz if (max_cpus != smp_cpus) { 25241a5008fcSGreg Kurz error_report("This machine version does not support CPU hotplug"); 25251a5008fcSGreg Kurz exit(1); 25261a5008fcSGreg Kurz } 25271a5008fcSGreg Kurz boot_cores_nr = possible_cpus->len; 25281a5008fcSGreg Kurz } 25291a5008fcSGreg Kurz 25301a5008fcSGreg Kurz if (smc->pre_2_10_has_unused_icps) { 25311a5008fcSGreg Kurz int i; 25321a5008fcSGreg Kurz 25331a518e76SCédric Le Goater for (i = 0; i < spapr_max_server_number(spapr); i++) { 25341a5008fcSGreg Kurz /* Dummy entries get deregistered when real ICPState objects 25351a5008fcSGreg Kurz * are registered during CPU core hotplug. 25361a5008fcSGreg Kurz */ 25371a5008fcSGreg Kurz pre_2_10_vmstate_register_dummy_icp(i); 25381a5008fcSGreg Kurz } 25391a5008fcSGreg Kurz } 25401a5008fcSGreg Kurz 25411a5008fcSGreg Kurz for (i = 0; i < possible_cpus->len; i++) { 25421a5008fcSGreg Kurz int core_id = i * smp_threads; 25431a5008fcSGreg Kurz 25441a5008fcSGreg Kurz if (mc->has_hotpluggable_cpus) { 25451a5008fcSGreg Kurz spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU, 25461a5008fcSGreg Kurz spapr_vcpu_id(spapr, core_id)); 25471a5008fcSGreg Kurz } 25481a5008fcSGreg Kurz 25491a5008fcSGreg Kurz if (i < boot_cores_nr) { 25501a5008fcSGreg Kurz Object *core = object_new(type); 25511a5008fcSGreg Kurz int nr_threads = smp_threads; 25521a5008fcSGreg Kurz 25531a5008fcSGreg Kurz /* Handle the partially filled core for older machine types */ 25541a5008fcSGreg Kurz if ((i + 1) * smp_threads >= smp_cpus) { 25551a5008fcSGreg Kurz nr_threads = smp_cpus - i * smp_threads; 25561a5008fcSGreg Kurz } 25571a5008fcSGreg Kurz 25581a5008fcSGreg Kurz object_property_set_int(core, nr_threads, "nr-threads", 25591a5008fcSGreg Kurz &error_fatal); 25601a5008fcSGreg Kurz object_property_set_int(core, core_id, CPU_CORE_PROP_CORE_ID, 25611a5008fcSGreg Kurz &error_fatal); 25621a5008fcSGreg Kurz object_property_set_bool(core, true, "realized", &error_fatal); 2563ecda255eSSam Bobroff 2564ecda255eSSam Bobroff object_unref(core); 25651a5008fcSGreg Kurz } 25661a5008fcSGreg Kurz } 25671a5008fcSGreg Kurz } 25681a5008fcSGreg Kurz 2569999c9cafSGreg Kurz static PCIHostState *spapr_create_default_phb(void) 2570999c9cafSGreg Kurz { 2571999c9cafSGreg Kurz DeviceState *dev; 2572999c9cafSGreg Kurz 2573999c9cafSGreg Kurz dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE); 2574999c9cafSGreg Kurz qdev_prop_set_uint32(dev, "index", 0); 2575999c9cafSGreg Kurz qdev_init_nofail(dev); 2576999c9cafSGreg Kurz 2577999c9cafSGreg Kurz return PCI_HOST_BRIDGE(dev); 2578999c9cafSGreg Kurz } 2579999c9cafSGreg Kurz 258053018216SPaolo Bonzini /* pSeries LPAR / sPAPR hardware init */ 2581bcb5ce08SDavid Gibson static void spapr_machine_init(MachineState *machine) 258253018216SPaolo Bonzini { 2583ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(machine); 2584ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); 25853ef96221SMarcel Apfelbaum const char *kernel_filename = machine->kernel_filename; 25863ef96221SMarcel Apfelbaum const char *initrd_filename = machine->initrd_filename; 258753018216SPaolo Bonzini PCIHostState *phb; 258853018216SPaolo Bonzini int i; 258953018216SPaolo Bonzini MemoryRegion *sysmem = get_system_memory(); 259053018216SPaolo Bonzini MemoryRegion *ram = g_new(MemoryRegion, 1); 2591c86c1affSDaniel Henrique Barboza hwaddr node0_size = spapr_node0_size(machine); 2592b7d1f77aSBenjamin Herrenschmidt long load_limit, fw_size; 259353018216SPaolo Bonzini char *filename; 259430f4b05bSDavid Gibson Error *resize_hpt_err = NULL; 259553018216SPaolo Bonzini 2596226419d6SMichael S. Tsirkin msi_nonbroken = true; 259753018216SPaolo Bonzini 259853018216SPaolo Bonzini QLIST_INIT(&spapr->phbs); 25990cffce56SDavid Gibson QTAILQ_INIT(&spapr->pending_dimm_unplugs); 260053018216SPaolo Bonzini 26019f6edd06SDavid Gibson /* Determine capabilities to run with */ 26029f6edd06SDavid Gibson spapr_caps_init(spapr); 26039f6edd06SDavid Gibson 260430f4b05bSDavid Gibson kvmppc_check_papr_resize_hpt(&resize_hpt_err); 260530f4b05bSDavid Gibson if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) { 260630f4b05bSDavid Gibson /* 260730f4b05bSDavid Gibson * If the user explicitly requested a mode we should either 260830f4b05bSDavid Gibson * supply it, or fail completely (which we do below). But if 260930f4b05bSDavid Gibson * it's not set explicitly, we reset our mode to something 261030f4b05bSDavid Gibson * that works 261130f4b05bSDavid Gibson */ 261230f4b05bSDavid Gibson if (resize_hpt_err) { 261330f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 261430f4b05bSDavid Gibson error_free(resize_hpt_err); 261530f4b05bSDavid Gibson resize_hpt_err = NULL; 261630f4b05bSDavid Gibson } else { 261730f4b05bSDavid Gibson spapr->resize_hpt = smc->resize_hpt_default; 261830f4b05bSDavid Gibson } 261930f4b05bSDavid Gibson } 262030f4b05bSDavid Gibson 262130f4b05bSDavid Gibson assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT); 262230f4b05bSDavid Gibson 262330f4b05bSDavid Gibson if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) { 262430f4b05bSDavid Gibson /* 262530f4b05bSDavid Gibson * User requested HPT resize, but this host can't supply it. Bail out 262630f4b05bSDavid Gibson */ 262730f4b05bSDavid Gibson error_report_err(resize_hpt_err); 262830f4b05bSDavid Gibson exit(1); 262930f4b05bSDavid Gibson } 263030f4b05bSDavid Gibson 2631c4177479SAlexey Kardashevskiy spapr->rma_size = node0_size; 263253018216SPaolo Bonzini 263353018216SPaolo Bonzini /* With KVM, we don't actually know whether KVM supports an 263453018216SPaolo Bonzini * unbounded RMA (PR KVM) or is limited by the hash table size 263553018216SPaolo Bonzini * (HV KVM using VRMA), so we always assume the latter 263653018216SPaolo Bonzini * 263753018216SPaolo Bonzini * In that case, we also limit the initial allocations for RTAS 263853018216SPaolo Bonzini * etc... to 256M since we have no way to know what the VRMA size 263953018216SPaolo Bonzini * is going to be as it depends on the size of the hash table 2640090052aaSDavid Gibson * which isn't determined yet. 264153018216SPaolo Bonzini */ 264253018216SPaolo Bonzini if (kvm_enabled()) { 264353018216SPaolo Bonzini spapr->vrma_adjust = 1; 264453018216SPaolo Bonzini spapr->rma_size = MIN(spapr->rma_size, 0x10000000); 264553018216SPaolo Bonzini } 2646912acdf4SBenjamin Herrenschmidt 2647090052aaSDavid Gibson /* Actually we don't support unbounded RMA anymore since we added 2648090052aaSDavid Gibson * proper emulation of HV mode. The max we can get is 16G which 2649090052aaSDavid Gibson * also happens to be what we configure for PAPR mode so make sure 2650090052aaSDavid Gibson * we don't do anything bigger than that 2651912acdf4SBenjamin Herrenschmidt */ 2652912acdf4SBenjamin Herrenschmidt spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull); 265353018216SPaolo Bonzini 2654c4177479SAlexey Kardashevskiy if (spapr->rma_size > node0_size) { 2655d54e4d76SDavid Gibson error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")", 2656c4177479SAlexey Kardashevskiy spapr->rma_size); 2657c4177479SAlexey Kardashevskiy exit(1); 2658c4177479SAlexey Kardashevskiy } 2659c4177479SAlexey Kardashevskiy 2660b7d1f77aSBenjamin Herrenschmidt /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */ 2661b7d1f77aSBenjamin Herrenschmidt load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD; 266253018216SPaolo Bonzini 2663482969d6SCédric Le Goater /* 2664482969d6SCédric Le Goater * VSMT must be set in order to be able to compute VCPU ids, ie to 26651a518e76SCédric Le Goater * call spapr_max_server_number() or spapr_vcpu_id(). 2666482969d6SCédric Le Goater */ 2667482969d6SCédric Le Goater spapr_set_vsmt_mode(spapr, &error_fatal); 2668482969d6SCédric Le Goater 26697b565160SDavid Gibson /* Set up Interrupt Controller before we create the VCPUs */ 2670fab397d8SCédric Le Goater spapr_irq_init(spapr, &error_fatal); 26717b565160SDavid Gibson 2672dc1b5eeeSGreg Kurz /* Set up containers for ibm,client-architecture-support negotiated options 2673dc1b5eeeSGreg Kurz */ 2674facdb8b6SMichael Roth spapr->ov5 = spapr_ovec_new(); 2675facdb8b6SMichael Roth spapr->ov5_cas = spapr_ovec_new(); 2676facdb8b6SMichael Roth 2677224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2678facdb8b6SMichael Roth spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); 26797c150d6fSDavid Gibson spapr_validate_node_memory(machine, &error_fatal); 2680224245bfSDavid Gibson } 2681224245bfSDavid Gibson 2682417ece33SMichael Roth spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); 2683417ece33SMichael Roth 2684ffbb1705SMichael Roth /* advertise support for dedicated HP event source to guests */ 2685ffbb1705SMichael Roth if (spapr->use_hotplug_event_source) { 2686ffbb1705SMichael Roth spapr_ovec_set(spapr->ov5, OV5_HP_EVT); 2687ffbb1705SMichael Roth } 2688ffbb1705SMichael Roth 26892772cf6bSDavid Gibson /* advertise support for HPT resizing */ 26902772cf6bSDavid Gibson if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { 26912772cf6bSDavid Gibson spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE); 26922772cf6bSDavid Gibson } 26932772cf6bSDavid Gibson 2694a324d6f1SBharata B Rao /* advertise support for ibm,dyamic-memory-v2 */ 2695a324d6f1SBharata B Rao spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2); 2696a324d6f1SBharata B Rao 2697db592b5bSCédric Le Goater /* advertise XIVE on POWER9 machines */ 2698ca62823bSDavid Gibson if (spapr->irq->xive) { 2699db592b5bSCédric Le Goater spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); 2700db592b5bSCédric Le Goater } 2701db592b5bSCédric Le Goater 270253018216SPaolo Bonzini /* init CPUs */ 27030c86d0fdSDavid Gibson spapr_init_cpus(spapr); 270453018216SPaolo Bonzini 270558c46efaSLaurent Vivier /* 270658c46efaSLaurent Vivier * check we don't have a memory-less/cpu-less NUMA node 270758c46efaSLaurent Vivier * Firmware relies on the existing memory/cpu topology to provide the 270858c46efaSLaurent Vivier * NUMA topology to the kernel. 270958c46efaSLaurent Vivier * And the linux kernel needs to know the NUMA topology at start 271058c46efaSLaurent Vivier * to be able to hotplug CPUs later. 271158c46efaSLaurent Vivier */ 271258c46efaSLaurent Vivier if (machine->numa_state->num_nodes) { 271358c46efaSLaurent Vivier for (i = 0; i < machine->numa_state->num_nodes; ++i) { 271458c46efaSLaurent Vivier /* check for memory-less node */ 271558c46efaSLaurent Vivier if (machine->numa_state->nodes[i].node_mem == 0) { 271658c46efaSLaurent Vivier CPUState *cs; 271758c46efaSLaurent Vivier int found = 0; 271858c46efaSLaurent Vivier /* check for cpu-less node */ 271958c46efaSLaurent Vivier CPU_FOREACH(cs) { 272058c46efaSLaurent Vivier PowerPCCPU *cpu = POWERPC_CPU(cs); 272158c46efaSLaurent Vivier if (cpu->node_id == i) { 272258c46efaSLaurent Vivier found = 1; 272358c46efaSLaurent Vivier break; 272458c46efaSLaurent Vivier } 272558c46efaSLaurent Vivier } 272658c46efaSLaurent Vivier /* memory-less and cpu-less node */ 272758c46efaSLaurent Vivier if (!found) { 272858c46efaSLaurent Vivier error_report( 272958c46efaSLaurent Vivier "Memory-less/cpu-less nodes are not supported (node %d)", 273058c46efaSLaurent Vivier i); 273158c46efaSLaurent Vivier exit(1); 273258c46efaSLaurent Vivier } 273358c46efaSLaurent Vivier } 273458c46efaSLaurent Vivier } 273558c46efaSLaurent Vivier 273658c46efaSLaurent Vivier } 273758c46efaSLaurent Vivier 2738db5127b2SDavid Gibson /* 2739db5127b2SDavid Gibson * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. 2740db5127b2SDavid Gibson * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is 2741db5127b2SDavid Gibson * called from vPHB reset handler so we initialize the counter here. 2742db5127b2SDavid Gibson * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM 2743db5127b2SDavid Gibson * must be equally distant from any other node. 2744db5127b2SDavid Gibson * The final value of spapr->gpu_numa_id is going to be written to 2745db5127b2SDavid Gibson * max-associativity-domains in spapr_build_fdt(). 2746db5127b2SDavid Gibson */ 2747db5127b2SDavid Gibson spapr->gpu_numa_id = MAX(1, machine->numa_state->num_nodes); 2748db5127b2SDavid Gibson 27490550b120SGreg Kurz if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) && 2750ad99d04cSDavid Gibson ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, 27510550b120SGreg Kurz spapr->max_compat_pvr)) { 27520550b120SGreg Kurz /* KVM and TCG always allow GTSE with radix... */ 27530550b120SGreg Kurz spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE); 27540550b120SGreg Kurz } 27550550b120SGreg Kurz /* ... but not with hash (currently). */ 27560550b120SGreg Kurz 2757026bfd89SDavid Gibson if (kvm_enabled()) { 2758026bfd89SDavid Gibson /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */ 2759026bfd89SDavid Gibson kvmppc_enable_logical_ci_hcalls(); 2760ef9971ddSAlexey Kardashevskiy kvmppc_enable_set_mode_hcall(); 27615145ad4fSNathan Whitehorn 27625145ad4fSNathan Whitehorn /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */ 27635145ad4fSNathan Whitehorn kvmppc_enable_clear_ref_mod_hcalls(); 276468f9f708SSuraj Jitindar Singh 276568f9f708SSuraj Jitindar Singh /* Enable H_PAGE_INIT */ 276668f9f708SSuraj Jitindar Singh kvmppc_enable_h_page_init(); 2767026bfd89SDavid Gibson } 2768026bfd89SDavid Gibson 276953018216SPaolo Bonzini /* allocate RAM */ 2770f92f5da1SAlexey Kardashevskiy memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram", 2771fb164994SDavid Gibson machine->ram_size); 2772f92f5da1SAlexey Kardashevskiy memory_region_add_subregion(sysmem, 0, ram); 277353018216SPaolo Bonzini 2774b0c14ec4SDavid Hildenbrand /* always allocate the device memory information */ 2775b0c14ec4SDavid Hildenbrand machine->device_memory = g_malloc0(sizeof(*machine->device_memory)); 2776b0c14ec4SDavid Hildenbrand 27774a1c9cf0SBharata B Rao /* initialize hotplug memory address space */ 27784a1c9cf0SBharata B Rao if (machine->ram_size < machine->maxram_size) { 27790c9269a5SDavid Hildenbrand ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size; 278071c9a3ddSBharata B Rao /* 278171c9a3ddSBharata B Rao * Limit the number of hotpluggable memory slots to half the number 278271c9a3ddSBharata B Rao * slots that KVM supports, leaving the other half for PCI and other 278371c9a3ddSBharata B Rao * devices. However ensure that number of slots doesn't drop below 32. 278471c9a3ddSBharata B Rao */ 278571c9a3ddSBharata B Rao int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 : 278671c9a3ddSBharata B Rao SPAPR_MAX_RAM_SLOTS; 27874a1c9cf0SBharata B Rao 278871c9a3ddSBharata B Rao if (max_memslots < SPAPR_MAX_RAM_SLOTS) { 278971c9a3ddSBharata B Rao max_memslots = SPAPR_MAX_RAM_SLOTS; 279071c9a3ddSBharata B Rao } 279171c9a3ddSBharata B Rao if (machine->ram_slots > max_memslots) { 2792d54e4d76SDavid Gibson error_report("Specified number of memory slots %" 2793d54e4d76SDavid Gibson PRIu64" exceeds max supported %d", 279471c9a3ddSBharata B Rao machine->ram_slots, max_memslots); 2795d54e4d76SDavid Gibson exit(1); 27964a1c9cf0SBharata B Rao } 27974a1c9cf0SBharata B Rao 2798b0c14ec4SDavid Hildenbrand machine->device_memory->base = ROUND_UP(machine->ram_size, 27990c9269a5SDavid Hildenbrand SPAPR_DEVICE_MEM_ALIGN); 2800b0c14ec4SDavid Hildenbrand memory_region_init(&machine->device_memory->mr, OBJECT(spapr), 28010c9269a5SDavid Hildenbrand "device-memory", device_mem_size); 2802b0c14ec4SDavid Hildenbrand memory_region_add_subregion(sysmem, machine->device_memory->base, 2803b0c14ec4SDavid Hildenbrand &machine->device_memory->mr); 28044a1c9cf0SBharata B Rao } 28054a1c9cf0SBharata B Rao 2806224245bfSDavid Gibson if (smc->dr_lmb_enabled) { 2807224245bfSDavid Gibson spapr_create_lmb_dr_connectors(spapr); 2808224245bfSDavid Gibson } 2809224245bfSDavid Gibson 2810ffbb1705SMichael Roth /* Set up RTAS event infrastructure */ 281153018216SPaolo Bonzini spapr_events_init(spapr); 281253018216SPaolo Bonzini 281312f42174SDavid Gibson /* Set up the RTC RTAS interfaces */ 281428df36a1SDavid Gibson spapr_rtc_create(spapr); 281512f42174SDavid Gibson 281653018216SPaolo Bonzini /* Set up VIO bus */ 281753018216SPaolo Bonzini spapr->vio_bus = spapr_vio_bus_init(); 281853018216SPaolo Bonzini 2819b8846a4dSPeter Maydell for (i = 0; i < serial_max_hds(); i++) { 28209bca0edbSPeter Maydell if (serial_hd(i)) { 28219bca0edbSPeter Maydell spapr_vty_create(spapr->vio_bus, serial_hd(i)); 282253018216SPaolo Bonzini } 282353018216SPaolo Bonzini } 282453018216SPaolo Bonzini 282553018216SPaolo Bonzini /* We always have at least the nvram device on VIO */ 282653018216SPaolo Bonzini spapr_create_nvram(spapr); 282753018216SPaolo Bonzini 2828962b6c36SMichael Roth /* 2829962b6c36SMichael Roth * Setup hotplug / dynamic-reconfiguration connectors. top-level 2830962b6c36SMichael Roth * connectors (described in root DT node's "ibm,drc-types" property) 2831962b6c36SMichael Roth * are pre-initialized here. additional child connectors (such as 2832962b6c36SMichael Roth * connectors for a PHBs PCI slots) are added as needed during their 2833962b6c36SMichael Roth * parent's realization. 2834962b6c36SMichael Roth */ 2835962b6c36SMichael Roth if (smc->dr_phb_enabled) { 2836962b6c36SMichael Roth for (i = 0; i < SPAPR_MAX_PHBS; i++) { 2837962b6c36SMichael Roth spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i); 2838962b6c36SMichael Roth } 2839962b6c36SMichael Roth } 2840962b6c36SMichael Roth 284153018216SPaolo Bonzini /* Set up PCI */ 284253018216SPaolo Bonzini spapr_pci_rtas_init(); 284353018216SPaolo Bonzini 2844999c9cafSGreg Kurz phb = spapr_create_default_phb(); 284553018216SPaolo Bonzini 284653018216SPaolo Bonzini for (i = 0; i < nb_nics; i++) { 284753018216SPaolo Bonzini NICInfo *nd = &nd_table[i]; 284853018216SPaolo Bonzini 284953018216SPaolo Bonzini if (!nd->model) { 28503c3a4e7aSThomas Huth nd->model = g_strdup("spapr-vlan"); 285153018216SPaolo Bonzini } 285253018216SPaolo Bonzini 28533c3a4e7aSThomas Huth if (g_str_equal(nd->model, "spapr-vlan") || 28543c3a4e7aSThomas Huth g_str_equal(nd->model, "ibmveth")) { 285553018216SPaolo Bonzini spapr_vlan_create(spapr->vio_bus, nd); 285653018216SPaolo Bonzini } else { 285729b358f9SDavid Gibson pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); 285853018216SPaolo Bonzini } 285953018216SPaolo Bonzini } 286053018216SPaolo Bonzini 286153018216SPaolo Bonzini for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { 286253018216SPaolo Bonzini spapr_vscsi_create(spapr->vio_bus); 286353018216SPaolo Bonzini } 286453018216SPaolo Bonzini 286553018216SPaolo Bonzini /* Graphics */ 286614c6a894SDavid Gibson if (spapr_vga_init(phb->bus, &error_fatal)) { 286753018216SPaolo Bonzini spapr->has_graphics = true; 2868c6e76503SPaolo Bonzini machine->usb |= defaults_enabled() && !machine->usb_disabled; 286953018216SPaolo Bonzini } 287053018216SPaolo Bonzini 28714ee9ced9SMarcel Apfelbaum if (machine->usb) { 287257040d45SThomas Huth if (smc->use_ohci_by_default) { 287353018216SPaolo Bonzini pci_create_simple(phb->bus, -1, "pci-ohci"); 287457040d45SThomas Huth } else { 287557040d45SThomas Huth pci_create_simple(phb->bus, -1, "nec-usb-xhci"); 287657040d45SThomas Huth } 2877c86580b8SMarkus Armbruster 287853018216SPaolo Bonzini if (spapr->has_graphics) { 2879c86580b8SMarkus Armbruster USBBus *usb_bus = usb_bus_find(-1); 2880c86580b8SMarkus Armbruster 2881c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-kbd"); 2882c86580b8SMarkus Armbruster usb_create_simple(usb_bus, "usb-mouse"); 288353018216SPaolo Bonzini } 288453018216SPaolo Bonzini } 288553018216SPaolo Bonzini 2886ab3dd749SPhilippe Mathieu-Daudé if (spapr->rma_size < (MIN_RMA_SLOF * MiB)) { 2887d54e4d76SDavid Gibson error_report( 2888d54e4d76SDavid Gibson "pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)", 2889d54e4d76SDavid Gibson MIN_RMA_SLOF); 289053018216SPaolo Bonzini exit(1); 289153018216SPaolo Bonzini } 289253018216SPaolo Bonzini 289353018216SPaolo Bonzini if (kernel_filename) { 289453018216SPaolo Bonzini uint64_t lowaddr = 0; 289553018216SPaolo Bonzini 28964366e1dbSLiam Merwick spapr->kernel_size = load_elf(kernel_filename, NULL, 28974366e1dbSLiam Merwick translate_kernel_address, NULL, 2898*6cdda0ffSAleksandar Markovic NULL, &lowaddr, NULL, NULL, 1, 2899a19f7fb0SDavid Gibson PPC_ELF_MACHINE, 0, 0); 2900a19f7fb0SDavid Gibson if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) { 29014366e1dbSLiam Merwick spapr->kernel_size = load_elf(kernel_filename, NULL, 2902a19f7fb0SDavid Gibson translate_kernel_address, NULL, NULL, 2903*6cdda0ffSAleksandar Markovic &lowaddr, NULL, NULL, 0, 2904*6cdda0ffSAleksandar Markovic PPC_ELF_MACHINE, 0, 0); 2905a19f7fb0SDavid Gibson spapr->kernel_le = spapr->kernel_size > 0; 290616457e7fSBenjamin Herrenschmidt } 2907a19f7fb0SDavid Gibson if (spapr->kernel_size < 0) { 2908a19f7fb0SDavid Gibson error_report("error loading %s: %s", kernel_filename, 2909a19f7fb0SDavid Gibson load_elf_strerror(spapr->kernel_size)); 291053018216SPaolo Bonzini exit(1); 291153018216SPaolo Bonzini } 291253018216SPaolo Bonzini 291353018216SPaolo Bonzini /* load initrd */ 291453018216SPaolo Bonzini if (initrd_filename) { 291553018216SPaolo Bonzini /* Try to locate the initrd in the gap between the kernel 291653018216SPaolo Bonzini * and the firmware. Add a bit of space just in case 291753018216SPaolo Bonzini */ 2918a19f7fb0SDavid Gibson spapr->initrd_base = (KERNEL_LOAD_ADDR + spapr->kernel_size 2919a19f7fb0SDavid Gibson + 0x1ffff) & ~0xffff; 2920a19f7fb0SDavid Gibson spapr->initrd_size = load_image_targphys(initrd_filename, 2921a19f7fb0SDavid Gibson spapr->initrd_base, 2922a19f7fb0SDavid Gibson load_limit 2923a19f7fb0SDavid Gibson - spapr->initrd_base); 2924a19f7fb0SDavid Gibson if (spapr->initrd_size < 0) { 2925d54e4d76SDavid Gibson error_report("could not load initial ram disk '%s'", 292653018216SPaolo Bonzini initrd_filename); 292753018216SPaolo Bonzini exit(1); 292853018216SPaolo Bonzini } 292953018216SPaolo Bonzini } 293053018216SPaolo Bonzini } 293153018216SPaolo Bonzini 29328e7ea787SAndreas Färber if (bios_name == NULL) { 29338e7ea787SAndreas Färber bios_name = FW_FILE_NAME; 29348e7ea787SAndreas Färber } 29358e7ea787SAndreas Färber filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 29364c56440dSStefan Weil if (!filename) { 293768fea5a0SThomas Huth error_report("Could not find LPAR firmware '%s'", bios_name); 29384c56440dSStefan Weil exit(1); 29394c56440dSStefan Weil } 294053018216SPaolo Bonzini fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); 294168fea5a0SThomas Huth if (fw_size <= 0) { 294268fea5a0SThomas Huth error_report("Could not load LPAR firmware '%s'", filename); 294353018216SPaolo Bonzini exit(1); 294453018216SPaolo Bonzini } 294553018216SPaolo Bonzini g_free(filename); 294653018216SPaolo Bonzini 294728e02042SDavid Gibson /* FIXME: Should register things through the MachineState's qdev 294828e02042SDavid Gibson * interface, this is a legacy from the sPAPREnvironment structure 294928e02042SDavid Gibson * which predated MachineState but had a similar function */ 29504be21d56SDavid Gibson vmstate_register(NULL, 0, &vmstate_spapr, spapr); 29511df2c9a2SPeter Xu register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1, 29524be21d56SDavid Gibson &savevm_htab_handlers, spapr); 29534be21d56SDavid Gibson 2954bb2bdd81SGreg Kurz qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine), 2955bb2bdd81SGreg Kurz &error_fatal); 2956bb2bdd81SGreg Kurz 29575b2128d2SAlexander Graf qemu_register_boot_set(spapr_boot_set, spapr); 295842043e4fSLaurent Vivier 295993eac7b8SNicholas Piggin /* 296093eac7b8SNicholas Piggin * Nothing needs to be done to resume a suspended guest because 296193eac7b8SNicholas Piggin * suspending does not change the machine state, so no need for 296293eac7b8SNicholas Piggin * a ->wakeup method. 296393eac7b8SNicholas Piggin */ 296493eac7b8SNicholas Piggin qemu_register_wakeup_support(); 296593eac7b8SNicholas Piggin 296642043e4fSLaurent Vivier if (kvm_enabled()) { 29673dc410aeSAlexey Kardashevskiy /* to stop and start vmclock */ 296842043e4fSLaurent Vivier qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change, 296942043e4fSLaurent Vivier &spapr->tb); 29703dc410aeSAlexey Kardashevskiy 29713dc410aeSAlexey Kardashevskiy kvmppc_spapr_enable_inkernel_multitce(); 297242043e4fSLaurent Vivier } 297353018216SPaolo Bonzini } 297453018216SPaolo Bonzini 2975dc0ca80eSEric Auger static int spapr_kvm_type(MachineState *machine, const char *vm_type) 2976135a129aSAneesh Kumar K.V { 2977135a129aSAneesh Kumar K.V if (!vm_type) { 2978135a129aSAneesh Kumar K.V return 0; 2979135a129aSAneesh Kumar K.V } 2980135a129aSAneesh Kumar K.V 2981135a129aSAneesh Kumar K.V if (!strcmp(vm_type, "HV")) { 2982135a129aSAneesh Kumar K.V return 1; 2983135a129aSAneesh Kumar K.V } 2984135a129aSAneesh Kumar K.V 2985135a129aSAneesh Kumar K.V if (!strcmp(vm_type, "PR")) { 2986135a129aSAneesh Kumar K.V return 2; 2987135a129aSAneesh Kumar K.V } 2988135a129aSAneesh Kumar K.V 2989135a129aSAneesh Kumar K.V error_report("Unknown kvm-type specified '%s'", vm_type); 2990135a129aSAneesh Kumar K.V exit(1); 2991135a129aSAneesh Kumar K.V } 2992135a129aSAneesh Kumar K.V 299371461b0fSAlexey Kardashevskiy /* 2994627b84f4SGonglei * Implementation of an interface to adjust firmware path 299571461b0fSAlexey Kardashevskiy * for the bootindex property handling. 299671461b0fSAlexey Kardashevskiy */ 299771461b0fSAlexey Kardashevskiy static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus, 299871461b0fSAlexey Kardashevskiy DeviceState *dev) 299971461b0fSAlexey Kardashevskiy { 300071461b0fSAlexey Kardashevskiy #define CAST(type, obj, name) \ 300171461b0fSAlexey Kardashevskiy ((type *)object_dynamic_cast(OBJECT(obj), (name))) 300271461b0fSAlexey Kardashevskiy SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE); 3003ce2918cbSDavid Gibson SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE); 3004c4e13492SFelipe Franciosi VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON); 300571461b0fSAlexey Kardashevskiy 300671461b0fSAlexey Kardashevskiy if (d) { 300771461b0fSAlexey Kardashevskiy void *spapr = CAST(void, bus->parent, "spapr-vscsi"); 300871461b0fSAlexey Kardashevskiy VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI); 300971461b0fSAlexey Kardashevskiy USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE); 301071461b0fSAlexey Kardashevskiy 301171461b0fSAlexey Kardashevskiy if (spapr) { 301271461b0fSAlexey Kardashevskiy /* 301371461b0fSAlexey Kardashevskiy * Replace "channel@0/disk@0,0" with "disk@8000000000000000": 30141ac24c91SThomas Huth * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form 30151ac24c91SThomas Huth * 0x8000 | (target << 8) | (bus << 5) | lun 30161ac24c91SThomas Huth * (see the "Logical unit addressing format" table in SAM5) 301771461b0fSAlexey Kardashevskiy */ 30181ac24c91SThomas Huth unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun; 301971461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 302071461b0fSAlexey Kardashevskiy (uint64_t)id << 48); 302171461b0fSAlexey Kardashevskiy } else if (virtio) { 302271461b0fSAlexey Kardashevskiy /* 302371461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (target << 8) | lun 302471461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 302571461b0fSAlexey Kardashevskiy * Note: the quote above is from SLOF and it is wrong, 302671461b0fSAlexey Kardashevskiy * the actual binding is: 302771461b0fSAlexey Kardashevskiy * swap 0100 or 10 << or 20 << ( target lun-id -- srplun ) 302871461b0fSAlexey Kardashevskiy */ 302971461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (d->id << 16) | d->lun; 3030bac658d1SThomas Huth if (d->lun >= 256) { 3031bac658d1SThomas Huth /* Use the LUN "flat space addressing method" */ 3032bac658d1SThomas Huth id |= 0x4000; 3033bac658d1SThomas Huth } 303471461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 303571461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 303671461b0fSAlexey Kardashevskiy } else if (usb) { 303771461b0fSAlexey Kardashevskiy /* 303871461b0fSAlexey Kardashevskiy * We use SRP luns of the form 01000000 | (usb-port << 16) | lun 303971461b0fSAlexey Kardashevskiy * in the top 32 bits of the 64-bit LUN 304071461b0fSAlexey Kardashevskiy */ 304171461b0fSAlexey Kardashevskiy unsigned usb_port = atoi(usb->port->path); 304271461b0fSAlexey Kardashevskiy unsigned id = 0x1000000 | (usb_port << 16) | d->lun; 304371461b0fSAlexey Kardashevskiy return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 304471461b0fSAlexey Kardashevskiy (uint64_t)id << 32); 304571461b0fSAlexey Kardashevskiy } 304671461b0fSAlexey Kardashevskiy } 304771461b0fSAlexey Kardashevskiy 3048b99260ebSThomas Huth /* 3049b99260ebSThomas Huth * SLOF probes the USB devices, and if it recognizes that the device is a 3050b99260ebSThomas Huth * storage device, it changes its name to "storage" instead of "usb-host", 3051b99260ebSThomas Huth * and additionally adds a child node for the SCSI LUN, so the correct 3052b99260ebSThomas Huth * boot path in SLOF is something like .../storage@1/disk@xxx" instead. 3053b99260ebSThomas Huth */ 3054b99260ebSThomas Huth if (strcmp("usb-host", qdev_fw_name(dev)) == 0) { 3055b99260ebSThomas Huth USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE); 3056b99260ebSThomas Huth if (usb_host_dev_is_scsi_storage(usbdev)) { 3057b99260ebSThomas Huth return g_strdup_printf("storage@%s/disk", usbdev->port->path); 3058b99260ebSThomas Huth } 3059b99260ebSThomas Huth } 3060b99260ebSThomas Huth 306171461b0fSAlexey Kardashevskiy if (phb) { 306271461b0fSAlexey Kardashevskiy /* Replace "pci" with "pci@800000020000000" */ 306371461b0fSAlexey Kardashevskiy return g_strdup_printf("pci@%"PRIX64, phb->buid); 306471461b0fSAlexey Kardashevskiy } 306571461b0fSAlexey Kardashevskiy 3066c4e13492SFelipe Franciosi if (vsc) { 3067c4e13492SFelipe Franciosi /* Same logic as virtio above */ 3068c4e13492SFelipe Franciosi unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun; 3069c4e13492SFelipe Franciosi return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32); 3070c4e13492SFelipe Franciosi } 3071c4e13492SFelipe Franciosi 30724871dd4cSThomas Huth if (g_str_equal("pci-bridge", qdev_fw_name(dev))) { 30734871dd4cSThomas Huth /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */ 30744871dd4cSThomas Huth PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); 30754871dd4cSThomas Huth return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn)); 30764871dd4cSThomas Huth } 30774871dd4cSThomas Huth 307871461b0fSAlexey Kardashevskiy return NULL; 307971461b0fSAlexey Kardashevskiy } 308071461b0fSAlexey Kardashevskiy 308123825581SEduardo Habkost static char *spapr_get_kvm_type(Object *obj, Error **errp) 308223825581SEduardo Habkost { 3083ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 308423825581SEduardo Habkost 308528e02042SDavid Gibson return g_strdup(spapr->kvm_type); 308623825581SEduardo Habkost } 308723825581SEduardo Habkost 308823825581SEduardo Habkost static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp) 308923825581SEduardo Habkost { 3090ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 309123825581SEduardo Habkost 309228e02042SDavid Gibson g_free(spapr->kvm_type); 309328e02042SDavid Gibson spapr->kvm_type = g_strdup(value); 309423825581SEduardo Habkost } 309523825581SEduardo Habkost 3096f6229214SMichael Roth static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp) 3097f6229214SMichael Roth { 3098ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3099f6229214SMichael Roth 3100f6229214SMichael Roth return spapr->use_hotplug_event_source; 3101f6229214SMichael Roth } 3102f6229214SMichael Roth 3103f6229214SMichael Roth static void spapr_set_modern_hotplug_events(Object *obj, bool value, 3104f6229214SMichael Roth Error **errp) 3105f6229214SMichael Roth { 3106ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3107f6229214SMichael Roth 3108f6229214SMichael Roth spapr->use_hotplug_event_source = value; 3109f6229214SMichael Roth } 3110f6229214SMichael Roth 3111fcad0d21SAlexey Kardashevskiy static bool spapr_get_msix_emulation(Object *obj, Error **errp) 3112fcad0d21SAlexey Kardashevskiy { 3113fcad0d21SAlexey Kardashevskiy return true; 3114fcad0d21SAlexey Kardashevskiy } 3115fcad0d21SAlexey Kardashevskiy 311630f4b05bSDavid Gibson static char *spapr_get_resize_hpt(Object *obj, Error **errp) 311730f4b05bSDavid Gibson { 3118ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 311930f4b05bSDavid Gibson 312030f4b05bSDavid Gibson switch (spapr->resize_hpt) { 312130f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DEFAULT: 312230f4b05bSDavid Gibson return g_strdup("default"); 312330f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_DISABLED: 312430f4b05bSDavid Gibson return g_strdup("disabled"); 312530f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_ENABLED: 312630f4b05bSDavid Gibson return g_strdup("enabled"); 312730f4b05bSDavid Gibson case SPAPR_RESIZE_HPT_REQUIRED: 312830f4b05bSDavid Gibson return g_strdup("required"); 312930f4b05bSDavid Gibson } 313030f4b05bSDavid Gibson g_assert_not_reached(); 313130f4b05bSDavid Gibson } 313230f4b05bSDavid Gibson 313330f4b05bSDavid Gibson static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp) 313430f4b05bSDavid Gibson { 3135ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 313630f4b05bSDavid Gibson 313730f4b05bSDavid Gibson if (strcmp(value, "default") == 0) { 313830f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT; 313930f4b05bSDavid Gibson } else if (strcmp(value, "disabled") == 0) { 314030f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; 314130f4b05bSDavid Gibson } else if (strcmp(value, "enabled") == 0) { 314230f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED; 314330f4b05bSDavid Gibson } else if (strcmp(value, "required") == 0) { 314430f4b05bSDavid Gibson spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED; 314530f4b05bSDavid Gibson } else { 314630f4b05bSDavid Gibson error_setg(errp, "Bad value for \"resize-hpt\" property"); 314730f4b05bSDavid Gibson } 314830f4b05bSDavid Gibson } 314930f4b05bSDavid Gibson 3150fa98fbfcSSam Bobroff static void spapr_get_vsmt(Object *obj, Visitor *v, const char *name, 3151fa98fbfcSSam Bobroff void *opaque, Error **errp) 3152fa98fbfcSSam Bobroff { 3153fa98fbfcSSam Bobroff visit_type_uint32(v, name, (uint32_t *)opaque, errp); 3154fa98fbfcSSam Bobroff } 3155fa98fbfcSSam Bobroff 3156fa98fbfcSSam Bobroff static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name, 3157fa98fbfcSSam Bobroff void *opaque, Error **errp) 3158fa98fbfcSSam Bobroff { 3159fa98fbfcSSam Bobroff visit_type_uint32(v, name, (uint32_t *)opaque, errp); 3160fa98fbfcSSam Bobroff } 3161fa98fbfcSSam Bobroff 31623ba3d0bcSCédric Le Goater static char *spapr_get_ic_mode(Object *obj, Error **errp) 31633ba3d0bcSCédric Le Goater { 3164ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 31653ba3d0bcSCédric Le Goater 31663ba3d0bcSCédric Le Goater if (spapr->irq == &spapr_irq_xics_legacy) { 31673ba3d0bcSCédric Le Goater return g_strdup("legacy"); 31683ba3d0bcSCédric Le Goater } else if (spapr->irq == &spapr_irq_xics) { 31693ba3d0bcSCédric Le Goater return g_strdup("xics"); 31703ba3d0bcSCédric Le Goater } else if (spapr->irq == &spapr_irq_xive) { 31713ba3d0bcSCédric Le Goater return g_strdup("xive"); 317213db0cd9SCédric Le Goater } else if (spapr->irq == &spapr_irq_dual) { 317313db0cd9SCédric Le Goater return g_strdup("dual"); 31743ba3d0bcSCédric Le Goater } 31753ba3d0bcSCédric Le Goater g_assert_not_reached(); 31763ba3d0bcSCédric Le Goater } 31773ba3d0bcSCédric Le Goater 31783ba3d0bcSCédric Le Goater static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp) 31793ba3d0bcSCédric Le Goater { 3180ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 31813ba3d0bcSCédric Le Goater 318221df5e4fSGreg Kurz if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 318321df5e4fSGreg Kurz error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode"); 318421df5e4fSGreg Kurz return; 318521df5e4fSGreg Kurz } 318621df5e4fSGreg Kurz 31873ba3d0bcSCédric Le Goater /* The legacy IRQ backend can not be set */ 31883ba3d0bcSCédric Le Goater if (strcmp(value, "xics") == 0) { 31893ba3d0bcSCédric Le Goater spapr->irq = &spapr_irq_xics; 31903ba3d0bcSCédric Le Goater } else if (strcmp(value, "xive") == 0) { 31913ba3d0bcSCédric Le Goater spapr->irq = &spapr_irq_xive; 319213db0cd9SCédric Le Goater } else if (strcmp(value, "dual") == 0) { 319313db0cd9SCédric Le Goater spapr->irq = &spapr_irq_dual; 31943ba3d0bcSCédric Le Goater } else { 31953ba3d0bcSCédric Le Goater error_setg(errp, "Bad value for \"ic-mode\" property"); 31963ba3d0bcSCédric Le Goater } 31973ba3d0bcSCédric Le Goater } 31983ba3d0bcSCédric Le Goater 319927461d69SPrasad J Pandit static char *spapr_get_host_model(Object *obj, Error **errp) 320027461d69SPrasad J Pandit { 3201ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 320227461d69SPrasad J Pandit 320327461d69SPrasad J Pandit return g_strdup(spapr->host_model); 320427461d69SPrasad J Pandit } 320527461d69SPrasad J Pandit 320627461d69SPrasad J Pandit static void spapr_set_host_model(Object *obj, const char *value, Error **errp) 320727461d69SPrasad J Pandit { 3208ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 320927461d69SPrasad J Pandit 321027461d69SPrasad J Pandit g_free(spapr->host_model); 321127461d69SPrasad J Pandit spapr->host_model = g_strdup(value); 321227461d69SPrasad J Pandit } 321327461d69SPrasad J Pandit 321427461d69SPrasad J Pandit static char *spapr_get_host_serial(Object *obj, Error **errp) 321527461d69SPrasad J Pandit { 3216ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 321727461d69SPrasad J Pandit 321827461d69SPrasad J Pandit return g_strdup(spapr->host_serial); 321927461d69SPrasad J Pandit } 322027461d69SPrasad J Pandit 322127461d69SPrasad J Pandit static void spapr_set_host_serial(Object *obj, const char *value, Error **errp) 322227461d69SPrasad J Pandit { 3223ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 322427461d69SPrasad J Pandit 322527461d69SPrasad J Pandit g_free(spapr->host_serial); 322627461d69SPrasad J Pandit spapr->host_serial = g_strdup(value); 322727461d69SPrasad J Pandit } 322827461d69SPrasad J Pandit 3229bcb5ce08SDavid Gibson static void spapr_instance_init(Object *obj) 323023825581SEduardo Habkost { 3231ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 3232ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 3233715c5407SDavid Gibson 3234715c5407SDavid Gibson spapr->htab_fd = -1; 3235f6229214SMichael Roth spapr->use_hotplug_event_source = true; 323623825581SEduardo Habkost object_property_add_str(obj, "kvm-type", 323723825581SEduardo Habkost spapr_get_kvm_type, spapr_set_kvm_type, NULL); 323849d2e648SMarcel Apfelbaum object_property_set_description(obj, "kvm-type", 323949d2e648SMarcel Apfelbaum "Specifies the KVM virtualization mode (HV, PR)", 324049d2e648SMarcel Apfelbaum NULL); 3241f6229214SMichael Roth object_property_add_bool(obj, "modern-hotplug-events", 3242f6229214SMichael Roth spapr_get_modern_hotplug_events, 3243f6229214SMichael Roth spapr_set_modern_hotplug_events, 3244f6229214SMichael Roth NULL); 3245f6229214SMichael Roth object_property_set_description(obj, "modern-hotplug-events", 3246f6229214SMichael Roth "Use dedicated hotplug event mechanism in" 3247f6229214SMichael Roth " place of standard EPOW events when possible" 3248f6229214SMichael Roth " (required for memory hot-unplug support)", 3249f6229214SMichael Roth NULL); 32507843c0d6SDavid Gibson ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr, 32517843c0d6SDavid Gibson "Maximum permitted CPU compatibility mode", 32527843c0d6SDavid Gibson &error_fatal); 325330f4b05bSDavid Gibson 325430f4b05bSDavid Gibson object_property_add_str(obj, "resize-hpt", 325530f4b05bSDavid Gibson spapr_get_resize_hpt, spapr_set_resize_hpt, NULL); 325630f4b05bSDavid Gibson object_property_set_description(obj, "resize-hpt", 325730f4b05bSDavid Gibson "Resizing of the Hash Page Table (enabled, disabled, required)", 325830f4b05bSDavid Gibson NULL); 3259fa98fbfcSSam Bobroff object_property_add(obj, "vsmt", "uint32", spapr_get_vsmt, 3260fa98fbfcSSam Bobroff spapr_set_vsmt, NULL, &spapr->vsmt, &error_abort); 3261fa98fbfcSSam Bobroff object_property_set_description(obj, "vsmt", 3262fa98fbfcSSam Bobroff "Virtual SMT: KVM behaves as if this were" 3263fa98fbfcSSam Bobroff " the host's SMT mode", &error_abort); 3264fcad0d21SAlexey Kardashevskiy object_property_add_bool(obj, "vfio-no-msix-emulation", 3265fcad0d21SAlexey Kardashevskiy spapr_get_msix_emulation, NULL, NULL); 32663ba3d0bcSCédric Le Goater 32673ba3d0bcSCédric Le Goater /* The machine class defines the default interrupt controller mode */ 32683ba3d0bcSCédric Le Goater spapr->irq = smc->irq; 32693ba3d0bcSCédric Le Goater object_property_add_str(obj, "ic-mode", spapr_get_ic_mode, 32703ba3d0bcSCédric Le Goater spapr_set_ic_mode, NULL); 32713ba3d0bcSCédric Le Goater object_property_set_description(obj, "ic-mode", 327213db0cd9SCédric Le Goater "Specifies the interrupt controller mode (xics, xive, dual)", 32733ba3d0bcSCédric Le Goater NULL); 327427461d69SPrasad J Pandit 327527461d69SPrasad J Pandit object_property_add_str(obj, "host-model", 327627461d69SPrasad J Pandit spapr_get_host_model, spapr_set_host_model, 327727461d69SPrasad J Pandit &error_abort); 327827461d69SPrasad J Pandit object_property_set_description(obj, "host-model", 32790a794529SDavid Gibson "Host model to advertise in guest device tree", &error_abort); 328027461d69SPrasad J Pandit object_property_add_str(obj, "host-serial", 328127461d69SPrasad J Pandit spapr_get_host_serial, spapr_set_host_serial, 328227461d69SPrasad J Pandit &error_abort); 328327461d69SPrasad J Pandit object_property_set_description(obj, "host-serial", 32840a794529SDavid Gibson "Host serial number to advertise in guest device tree", &error_abort); 328523825581SEduardo Habkost } 328623825581SEduardo Habkost 328787bbdd9cSDavid Gibson static void spapr_machine_finalizefn(Object *obj) 328887bbdd9cSDavid Gibson { 3289ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 329087bbdd9cSDavid Gibson 329187bbdd9cSDavid Gibson g_free(spapr->kvm_type); 329287bbdd9cSDavid Gibson } 329387bbdd9cSDavid Gibson 32941c7ad77eSNicholas Piggin void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg) 329534316482SAlexey Kardashevskiy { 329634316482SAlexey Kardashevskiy cpu_synchronize_state(cs); 329734316482SAlexey Kardashevskiy ppc_cpu_do_system_reset(cs); 329834316482SAlexey Kardashevskiy } 329934316482SAlexey Kardashevskiy 330034316482SAlexey Kardashevskiy static void spapr_nmi(NMIState *n, int cpu_index, Error **errp) 330134316482SAlexey Kardashevskiy { 330234316482SAlexey Kardashevskiy CPUState *cs; 330334316482SAlexey Kardashevskiy 330434316482SAlexey Kardashevskiy CPU_FOREACH(cs) { 33051c7ad77eSNicholas Piggin async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); 330634316482SAlexey Kardashevskiy } 330734316482SAlexey Kardashevskiy } 330834316482SAlexey Kardashevskiy 3309ce2918cbSDavid Gibson int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 331062d38c9bSGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 331162d38c9bSGreg Kurz { 331262d38c9bSGreg Kurz uint64_t addr; 331362d38c9bSGreg Kurz uint32_t node; 331462d38c9bSGreg Kurz 331562d38c9bSGreg Kurz addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE; 331662d38c9bSGreg Kurz node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP, 331762d38c9bSGreg Kurz &error_abort); 331862d38c9bSGreg Kurz *fdt_start_offset = spapr_populate_memory_node(fdt, node, addr, 331962d38c9bSGreg Kurz SPAPR_MEMORY_BLOCK_SIZE); 332062d38c9bSGreg Kurz return 0; 332162d38c9bSGreg Kurz } 332262d38c9bSGreg Kurz 332379b78a6bSMichael Roth static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size, 332462d38c9bSGreg Kurz bool dedicated_hp_event_source, Error **errp) 3325c20d332aSBharata B Rao { 3326ce2918cbSDavid Gibson SpaprDrc *drc; 3327c20d332aSBharata B Rao uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE; 332862d38c9bSGreg Kurz int i; 332979b78a6bSMichael Roth uint64_t addr = addr_start; 333094fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 3331160bb678SGreg Kurz Error *local_err = NULL; 3332c20d332aSBharata B Rao 3333c20d332aSBharata B Rao for (i = 0; i < nr_lmbs; i++) { 3334fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 3335c20d332aSBharata B Rao addr / SPAPR_MEMORY_BLOCK_SIZE); 3336c20d332aSBharata B Rao g_assert(drc); 3337c20d332aSBharata B Rao 333809d876ceSGreg Kurz spapr_drc_attach(drc, dev, &local_err); 3339160bb678SGreg Kurz if (local_err) { 3340160bb678SGreg Kurz while (addr > addr_start) { 3341160bb678SGreg Kurz addr -= SPAPR_MEMORY_BLOCK_SIZE; 3342160bb678SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 3343160bb678SGreg Kurz addr / SPAPR_MEMORY_BLOCK_SIZE); 3344a8dc47fdSDavid Gibson spapr_drc_detach(drc); 3345160bb678SGreg Kurz } 3346160bb678SGreg Kurz error_propagate(errp, local_err); 3347160bb678SGreg Kurz return; 3348160bb678SGreg Kurz } 334994fd9cbaSLaurent Vivier if (!hotplugged) { 335094fd9cbaSLaurent Vivier spapr_drc_reset(drc); 335194fd9cbaSLaurent Vivier } 3352c20d332aSBharata B Rao addr += SPAPR_MEMORY_BLOCK_SIZE; 3353c20d332aSBharata B Rao } 33545dd5238cSJianjun Duan /* send hotplug notification to the 33555dd5238cSJianjun Duan * guest only in case of hotplugged memory 33565dd5238cSJianjun Duan */ 335794fd9cbaSLaurent Vivier if (hotplugged) { 335879b78a6bSMichael Roth if (dedicated_hp_event_source) { 3359fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 336079b78a6bSMichael Roth addr_start / SPAPR_MEMORY_BLOCK_SIZE); 336179b78a6bSMichael Roth spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 336279b78a6bSMichael Roth nr_lmbs, 33630b55aa91SDavid Gibson spapr_drc_index(drc)); 336479b78a6bSMichael Roth } else { 336579b78a6bSMichael Roth spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB, 336679b78a6bSMichael Roth nr_lmbs); 336779b78a6bSMichael Roth } 3368c20d332aSBharata B Rao } 33695dd5238cSJianjun Duan } 3370c20d332aSBharata B Rao 3371c20d332aSBharata B Rao static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 337281985f3bSDavid Hildenbrand Error **errp) 3373c20d332aSBharata B Rao { 3374c20d332aSBharata B Rao Error *local_err = NULL; 3375ce2918cbSDavid Gibson SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev); 3376c20d332aSBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 3377b0e62443SDavid Hildenbrand uint64_t size, addr; 337804790978SThomas Huth 3379946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort); 3380df587133SThomas Huth 3381fd3416f5SDavid Hildenbrand pc_dimm_plug(dimm, MACHINE(ms), &local_err); 3382c20d332aSBharata B Rao if (local_err) { 3383c20d332aSBharata B Rao goto out; 3384c20d332aSBharata B Rao } 3385c20d332aSBharata B Rao 33869ed442b8SMarc-André Lureau addr = object_property_get_uint(OBJECT(dimm), 33879ed442b8SMarc-André Lureau PC_DIMM_ADDR_PROP, &local_err); 3388c20d332aSBharata B Rao if (local_err) { 3389160bb678SGreg Kurz goto out_unplug; 3390c20d332aSBharata B Rao } 3391c20d332aSBharata B Rao 339262d38c9bSGreg Kurz spapr_add_lmbs(dev, addr, size, spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT), 3393160bb678SGreg Kurz &local_err); 3394160bb678SGreg Kurz if (local_err) { 3395160bb678SGreg Kurz goto out_unplug; 3396160bb678SGreg Kurz } 3397c20d332aSBharata B Rao 3398160bb678SGreg Kurz return; 3399160bb678SGreg Kurz 3400160bb678SGreg Kurz out_unplug: 3401fd3416f5SDavid Hildenbrand pc_dimm_unplug(dimm, MACHINE(ms)); 3402c20d332aSBharata B Rao out: 3403c20d332aSBharata B Rao error_propagate(errp, local_err); 3404c20d332aSBharata B Rao } 3405c20d332aSBharata B Rao 3406c871bc70SLaurent Vivier static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3407c871bc70SLaurent Vivier Error **errp) 3408c871bc70SLaurent Vivier { 3409ce2918cbSDavid Gibson const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev); 3410ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3411c871bc70SLaurent Vivier PCDIMMDevice *dimm = PC_DIMM(dev); 34128f1ffe5bSDavid Hildenbrand Error *local_err = NULL; 341304790978SThomas Huth uint64_t size; 3414123eec65SDavid Gibson Object *memdev; 3415123eec65SDavid Gibson hwaddr pagesize; 3416c871bc70SLaurent Vivier 34174e8a01bdSDavid Hildenbrand if (!smc->dr_lmb_enabled) { 34184e8a01bdSDavid Hildenbrand error_setg(errp, "Memory hotplug not supported for this machine"); 34194e8a01bdSDavid Hildenbrand return; 34204e8a01bdSDavid Hildenbrand } 34214e8a01bdSDavid Hildenbrand 3422946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err); 3423946d6154SDavid Hildenbrand if (local_err) { 3424946d6154SDavid Hildenbrand error_propagate(errp, local_err); 342504790978SThomas Huth return; 342604790978SThomas Huth } 342704790978SThomas Huth 3428c871bc70SLaurent Vivier if (size % SPAPR_MEMORY_BLOCK_SIZE) { 3429c871bc70SLaurent Vivier error_setg(errp, "Hotplugged memory size must be a multiple of " 3430ab3dd749SPhilippe Mathieu-Daudé "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB); 3431c871bc70SLaurent Vivier return; 3432c871bc70SLaurent Vivier } 3433c871bc70SLaurent Vivier 3434123eec65SDavid Gibson memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, 3435123eec65SDavid Gibson &error_abort); 3436123eec65SDavid Gibson pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev)); 34378f1ffe5bSDavid Hildenbrand spapr_check_pagesize(spapr, pagesize, &local_err); 34388f1ffe5bSDavid Hildenbrand if (local_err) { 34398f1ffe5bSDavid Hildenbrand error_propagate(errp, local_err); 34408f1ffe5bSDavid Hildenbrand return; 34418f1ffe5bSDavid Hildenbrand } 34428f1ffe5bSDavid Hildenbrand 3443fd3416f5SDavid Hildenbrand pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp); 3444c871bc70SLaurent Vivier } 3445c871bc70SLaurent Vivier 3446ce2918cbSDavid Gibson struct SpaprDimmState { 34470cffce56SDavid Gibson PCDIMMDevice *dimm; 3448cf632463SBharata B Rao uint32_t nr_lmbs; 3449ce2918cbSDavid Gibson QTAILQ_ENTRY(SpaprDimmState) next; 34500cffce56SDavid Gibson }; 34510cffce56SDavid Gibson 3452ce2918cbSDavid Gibson static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s, 34530cffce56SDavid Gibson PCDIMMDevice *dimm) 34540cffce56SDavid Gibson { 3455ce2918cbSDavid Gibson SpaprDimmState *dimm_state = NULL; 34560cffce56SDavid Gibson 34570cffce56SDavid Gibson QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) { 34580cffce56SDavid Gibson if (dimm_state->dimm == dimm) { 34590cffce56SDavid Gibson break; 34600cffce56SDavid Gibson } 34610cffce56SDavid Gibson } 34620cffce56SDavid Gibson return dimm_state; 34630cffce56SDavid Gibson } 34640cffce56SDavid Gibson 3465ce2918cbSDavid Gibson static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr, 34668d5981c4SBharata B Rao uint32_t nr_lmbs, 34678d5981c4SBharata B Rao PCDIMMDevice *dimm) 34680cffce56SDavid Gibson { 3469ce2918cbSDavid Gibson SpaprDimmState *ds = NULL; 34708d5981c4SBharata B Rao 34718d5981c4SBharata B Rao /* 34728d5981c4SBharata B Rao * If this request is for a DIMM whose removal had failed earlier 34738d5981c4SBharata B Rao * (due to guest's refusal to remove the LMBs), we would have this 34748d5981c4SBharata B Rao * dimm already in the pending_dimm_unplugs list. In that 34758d5981c4SBharata B Rao * case don't add again. 34768d5981c4SBharata B Rao */ 34778d5981c4SBharata B Rao ds = spapr_pending_dimm_unplugs_find(spapr, dimm); 34788d5981c4SBharata B Rao if (!ds) { 3479ce2918cbSDavid Gibson ds = g_malloc0(sizeof(SpaprDimmState)); 34808d5981c4SBharata B Rao ds->nr_lmbs = nr_lmbs; 34818d5981c4SBharata B Rao ds->dimm = dimm; 34828d5981c4SBharata B Rao QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next); 34838d5981c4SBharata B Rao } 34848d5981c4SBharata B Rao return ds; 34850cffce56SDavid Gibson } 34860cffce56SDavid Gibson 3487ce2918cbSDavid Gibson static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr, 3488ce2918cbSDavid Gibson SpaprDimmState *dimm_state) 34890cffce56SDavid Gibson { 34900cffce56SDavid Gibson QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next); 34910cffce56SDavid Gibson g_free(dimm_state); 34920cffce56SDavid Gibson } 3493cf632463SBharata B Rao 3494ce2918cbSDavid Gibson static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms, 349516ee9980SDaniel Henrique Barboza PCDIMMDevice *dimm) 349616ee9980SDaniel Henrique Barboza { 3497ce2918cbSDavid Gibson SpaprDrc *drc; 3498946d6154SDavid Hildenbrand uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm), 3499946d6154SDavid Hildenbrand &error_abort); 350016ee9980SDaniel Henrique Barboza uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 350116ee9980SDaniel Henrique Barboza uint32_t avail_lmbs = 0; 350216ee9980SDaniel Henrique Barboza uint64_t addr_start, addr; 350316ee9980SDaniel Henrique Barboza int i; 350416ee9980SDaniel Henrique Barboza 350516ee9980SDaniel Henrique Barboza addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, 350616ee9980SDaniel Henrique Barboza &error_abort); 350716ee9980SDaniel Henrique Barboza 350816ee9980SDaniel Henrique Barboza addr = addr_start; 350916ee9980SDaniel Henrique Barboza for (i = 0; i < nr_lmbs; i++) { 3510fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 351116ee9980SDaniel Henrique Barboza addr / SPAPR_MEMORY_BLOCK_SIZE); 351216ee9980SDaniel Henrique Barboza g_assert(drc); 3513454b580aSDavid Gibson if (drc->dev) { 351416ee9980SDaniel Henrique Barboza avail_lmbs++; 351516ee9980SDaniel Henrique Barboza } 351616ee9980SDaniel Henrique Barboza addr += SPAPR_MEMORY_BLOCK_SIZE; 351716ee9980SDaniel Henrique Barboza } 351816ee9980SDaniel Henrique Barboza 35198d5981c4SBharata B Rao return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm); 352016ee9980SDaniel Henrique Barboza } 352116ee9980SDaniel Henrique Barboza 352231834723SDaniel Henrique Barboza /* Callback to be called during DRC release. */ 352331834723SDaniel Henrique Barboza void spapr_lmb_release(DeviceState *dev) 3524cf632463SBharata B Rao { 35253ec71474SDavid Hildenbrand HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3526ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl); 3527ce2918cbSDavid Gibson SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); 3528cf632463SBharata B Rao 352916ee9980SDaniel Henrique Barboza /* This information will get lost if a migration occurs 353016ee9980SDaniel Henrique Barboza * during the unplug process. In this case recover it. */ 353116ee9980SDaniel Henrique Barboza if (ds == NULL) { 353216ee9980SDaniel Henrique Barboza ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev)); 35338d5981c4SBharata B Rao g_assert(ds); 3534454b580aSDavid Gibson /* The DRC being examined by the caller at least must be counted */ 3535454b580aSDavid Gibson g_assert(ds->nr_lmbs); 353616ee9980SDaniel Henrique Barboza } 3537454b580aSDavid Gibson 3538454b580aSDavid Gibson if (--ds->nr_lmbs) { 3539cf632463SBharata B Rao return; 3540cf632463SBharata B Rao } 3541cf632463SBharata B Rao 3542cf632463SBharata B Rao /* 3543cf632463SBharata B Rao * Now that all the LMBs have been removed by the guest, call the 35443ec71474SDavid Hildenbrand * unplug handler chain. This can never fail. 3545cf632463SBharata B Rao */ 35463ec71474SDavid Hildenbrand hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 354707578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 35483ec71474SDavid Hildenbrand } 35493ec71474SDavid Hildenbrand 35503ec71474SDavid Hildenbrand static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 35513ec71474SDavid Hildenbrand { 3552ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3553ce2918cbSDavid Gibson SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); 35543ec71474SDavid Hildenbrand 3555fd3416f5SDavid Hildenbrand pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev)); 355607578b0aSDavid Hildenbrand object_property_set_bool(OBJECT(dev), false, "realized", NULL); 35572a129767SDaniel Henrique Barboza spapr_pending_dimm_unplugs_remove(spapr, ds); 3558cf632463SBharata B Rao } 3559cf632463SBharata B Rao 3560cf632463SBharata B Rao static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev, 3561cf632463SBharata B Rao DeviceState *dev, Error **errp) 3562cf632463SBharata B Rao { 3563ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); 3564cf632463SBharata B Rao Error *local_err = NULL; 3565cf632463SBharata B Rao PCDIMMDevice *dimm = PC_DIMM(dev); 356604790978SThomas Huth uint32_t nr_lmbs; 356704790978SThomas Huth uint64_t size, addr_start, addr; 35680cffce56SDavid Gibson int i; 3569ce2918cbSDavid Gibson SpaprDrc *drc; 357004790978SThomas Huth 3571946d6154SDavid Hildenbrand size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort); 357204790978SThomas Huth nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; 357304790978SThomas Huth 35749ed442b8SMarc-André Lureau addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, 35750cffce56SDavid Gibson &local_err); 3576cf632463SBharata B Rao if (local_err) { 3577cf632463SBharata B Rao goto out; 3578cf632463SBharata B Rao } 3579cf632463SBharata B Rao 35802a129767SDaniel Henrique Barboza /* 35812a129767SDaniel Henrique Barboza * An existing pending dimm state for this DIMM means that there is an 35822a129767SDaniel Henrique Barboza * unplug operation in progress, waiting for the spapr_lmb_release 35832a129767SDaniel Henrique Barboza * callback to complete the job (BQL can't cover that far). In this case, 35842a129767SDaniel Henrique Barboza * bail out to avoid detaching DRCs that were already released. 35852a129767SDaniel Henrique Barboza */ 35862a129767SDaniel Henrique Barboza if (spapr_pending_dimm_unplugs_find(spapr, dimm)) { 35872a129767SDaniel Henrique Barboza error_setg(&local_err, 35882a129767SDaniel Henrique Barboza "Memory unplug already in progress for device %s", 35892a129767SDaniel Henrique Barboza dev->id); 35902a129767SDaniel Henrique Barboza goto out; 35912a129767SDaniel Henrique Barboza } 35922a129767SDaniel Henrique Barboza 35938d5981c4SBharata B Rao spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm); 35940cffce56SDavid Gibson 35950cffce56SDavid Gibson addr = addr_start; 35960cffce56SDavid Gibson for (i = 0; i < nr_lmbs; i++) { 3597fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 35980cffce56SDavid Gibson addr / SPAPR_MEMORY_BLOCK_SIZE); 35990cffce56SDavid Gibson g_assert(drc); 36000cffce56SDavid Gibson 3601a8dc47fdSDavid Gibson spapr_drc_detach(drc); 36020cffce56SDavid Gibson addr += SPAPR_MEMORY_BLOCK_SIZE; 36030cffce56SDavid Gibson } 36040cffce56SDavid Gibson 3605fbf55397SDavid Gibson drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, 36060cffce56SDavid Gibson addr_start / SPAPR_MEMORY_BLOCK_SIZE); 36070cffce56SDavid Gibson spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, 36080b55aa91SDavid Gibson nr_lmbs, spapr_drc_index(drc)); 3609cf632463SBharata B Rao out: 3610cf632463SBharata B Rao error_propagate(errp, local_err); 3611cf632463SBharata B Rao } 3612cf632463SBharata B Rao 3613765d1bddSDavid Gibson /* Callback to be called during DRC release. */ 3614765d1bddSDavid Gibson void spapr_core_release(DeviceState *dev) 3615ff9006ddSIgor Mammedov { 3616a4261be1SDavid Hildenbrand HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3617a4261be1SDavid Hildenbrand 3618a4261be1SDavid Hildenbrand /* Call the unplug handler chain. This can never fail. */ 3619a4261be1SDavid Hildenbrand hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 362007578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 3621a4261be1SDavid Hildenbrand } 3622a4261be1SDavid Hildenbrand 3623a4261be1SDavid Hildenbrand static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 3624a4261be1SDavid Hildenbrand { 3625a4261be1SDavid Hildenbrand MachineState *ms = MACHINE(hotplug_dev); 3626ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); 3627ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3628535455fdSIgor Mammedov CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); 3629ff9006ddSIgor Mammedov 363046f7afa3SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 3631ce2918cbSDavid Gibson SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); 363246f7afa3SGreg Kurz int i; 363346f7afa3SGreg Kurz 363446f7afa3SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 363594ad93bdSGreg Kurz CPUState *cs = CPU(sc->threads[i]); 363646f7afa3SGreg Kurz 363746f7afa3SGreg Kurz pre_2_10_vmstate_register_dummy_icp(cs->cpu_index); 363846f7afa3SGreg Kurz } 363946f7afa3SGreg Kurz } 364046f7afa3SGreg Kurz 364107572c06SGreg Kurz assert(core_slot); 3642535455fdSIgor Mammedov core_slot->cpu = NULL; 364307578b0aSDavid Hildenbrand object_property_set_bool(OBJECT(dev), false, "realized", NULL); 3644ff9006ddSIgor Mammedov } 3645ff9006ddSIgor Mammedov 3646115debf2SIgor Mammedov static 3647115debf2SIgor Mammedov void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev, 3648ff9006ddSIgor Mammedov Error **errp) 3649ff9006ddSIgor Mammedov { 3650ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3651535455fdSIgor Mammedov int index; 3652ce2918cbSDavid Gibson SpaprDrc *drc; 3653535455fdSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3654ff9006ddSIgor Mammedov 3655535455fdSIgor Mammedov if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) { 3656535455fdSIgor Mammedov error_setg(errp, "Unable to find CPU core with core-id: %d", 3657535455fdSIgor Mammedov cc->core_id); 3658535455fdSIgor Mammedov return; 3659535455fdSIgor Mammedov } 3660ff9006ddSIgor Mammedov if (index == 0) { 3661ff9006ddSIgor Mammedov error_setg(errp, "Boot CPU core may not be unplugged"); 3662ff9006ddSIgor Mammedov return; 3663ff9006ddSIgor Mammedov } 3664ff9006ddSIgor Mammedov 36655d0fb150SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, 36665d0fb150SGreg Kurz spapr_vcpu_id(spapr, cc->core_id)); 3667ff9006ddSIgor Mammedov g_assert(drc); 3668ff9006ddSIgor Mammedov 366947c8c915SGreg Kurz if (!spapr_drc_unplug_requested(drc)) { 3670a8dc47fdSDavid Gibson spapr_drc_detach(drc); 3671ff9006ddSIgor Mammedov spapr_hotplug_req_remove_by_index(drc); 3672ff9006ddSIgor Mammedov } 367347c8c915SGreg Kurz } 3674ff9006ddSIgor Mammedov 3675ce2918cbSDavid Gibson int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 3676345b12b9SGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 3677345b12b9SGreg Kurz { 3678ce2918cbSDavid Gibson SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev); 3679345b12b9SGreg Kurz CPUState *cs = CPU(core->threads[0]); 3680345b12b9SGreg Kurz PowerPCCPU *cpu = POWERPC_CPU(cs); 3681345b12b9SGreg Kurz DeviceClass *dc = DEVICE_GET_CLASS(cs); 3682345b12b9SGreg Kurz int id = spapr_get_vcpu_id(cpu); 3683345b12b9SGreg Kurz char *nodename; 3684345b12b9SGreg Kurz int offset; 3685345b12b9SGreg Kurz 3686345b12b9SGreg Kurz nodename = g_strdup_printf("%s@%x", dc->fw_name, id); 3687345b12b9SGreg Kurz offset = fdt_add_subnode(fdt, 0, nodename); 3688345b12b9SGreg Kurz g_free(nodename); 3689345b12b9SGreg Kurz 3690345b12b9SGreg Kurz spapr_populate_cpu_dt(cs, fdt, offset, spapr); 3691345b12b9SGreg Kurz 3692345b12b9SGreg Kurz *fdt_start_offset = offset; 3693345b12b9SGreg Kurz return 0; 3694345b12b9SGreg Kurz } 3695345b12b9SGreg Kurz 3696ff9006ddSIgor Mammedov static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3697ff9006ddSIgor Mammedov Error **errp) 3698ff9006ddSIgor Mammedov { 3699ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3700ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(spapr); 3701ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 3702ce2918cbSDavid Gibson SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev)); 3703ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 3704345b12b9SGreg Kurz CPUState *cs; 3705ce2918cbSDavid Gibson SpaprDrc *drc; 3706ff9006ddSIgor Mammedov Error *local_err = NULL; 3707535455fdSIgor Mammedov CPUArchId *core_slot; 3708535455fdSIgor Mammedov int index; 370994fd9cbaSLaurent Vivier bool hotplugged = spapr_drc_hotplugged(dev); 3710b1e81567SGreg Kurz int i; 3711ff9006ddSIgor Mammedov 3712535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 3713535455fdSIgor Mammedov if (!core_slot) { 3714535455fdSIgor Mammedov error_setg(errp, "Unable to find CPU core with core-id: %d", 3715535455fdSIgor Mammedov cc->core_id); 3716535455fdSIgor Mammedov return; 3717535455fdSIgor Mammedov } 37185d0fb150SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, 37195d0fb150SGreg Kurz spapr_vcpu_id(spapr, cc->core_id)); 3720ff9006ddSIgor Mammedov 3721c5514d0eSIgor Mammedov g_assert(drc || !mc->has_hotpluggable_cpus); 3722ff9006ddSIgor Mammedov 3723e49c63d5SGreg Kurz if (drc) { 372409d876ceSGreg Kurz spapr_drc_attach(drc, dev, &local_err); 3725ff9006ddSIgor Mammedov if (local_err) { 3726ff9006ddSIgor Mammedov error_propagate(errp, local_err); 3727ff9006ddSIgor Mammedov return; 3728ff9006ddSIgor Mammedov } 3729ff9006ddSIgor Mammedov 373094fd9cbaSLaurent Vivier if (hotplugged) { 3731ff9006ddSIgor Mammedov /* 373294fd9cbaSLaurent Vivier * Send hotplug notification interrupt to the guest only 373394fd9cbaSLaurent Vivier * in case of hotplugged CPUs. 3734ff9006ddSIgor Mammedov */ 3735ff9006ddSIgor Mammedov spapr_hotplug_req_add_by_index(drc); 373694fd9cbaSLaurent Vivier } else { 373794fd9cbaSLaurent Vivier spapr_drc_reset(drc); 3738ff9006ddSIgor Mammedov } 373994fd9cbaSLaurent Vivier } 374094fd9cbaSLaurent Vivier 3741535455fdSIgor Mammedov core_slot->cpu = OBJECT(dev); 374246f7afa3SGreg Kurz 374346f7afa3SGreg Kurz if (smc->pre_2_10_has_unused_icps) { 374446f7afa3SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 3745bc877283SGreg Kurz cs = CPU(core->threads[i]); 374646f7afa3SGreg Kurz pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); 374746f7afa3SGreg Kurz } 374846f7afa3SGreg Kurz } 3749b1e81567SGreg Kurz 3750b1e81567SGreg Kurz /* 3751b1e81567SGreg Kurz * Set compatibility mode to match the boot CPU, which was either set 3752b1e81567SGreg Kurz * by the machine reset code or by CAS. 3753b1e81567SGreg Kurz */ 3754b1e81567SGreg Kurz if (hotplugged) { 3755b1e81567SGreg Kurz for (i = 0; i < cc->nr_threads; i++) { 3756b1e81567SGreg Kurz ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr, 3757b1e81567SGreg Kurz &local_err); 3758b1e81567SGreg Kurz if (local_err) { 3759b1e81567SGreg Kurz error_propagate(errp, local_err); 3760b1e81567SGreg Kurz return; 3761b1e81567SGreg Kurz } 3762b1e81567SGreg Kurz } 3763b1e81567SGreg Kurz } 3764ff9006ddSIgor Mammedov } 3765ff9006ddSIgor Mammedov 3766ff9006ddSIgor Mammedov static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3767ff9006ddSIgor Mammedov Error **errp) 3768ff9006ddSIgor Mammedov { 3769ff9006ddSIgor Mammedov MachineState *machine = MACHINE(OBJECT(hotplug_dev)); 3770ff9006ddSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); 3771ff9006ddSIgor Mammedov Error *local_err = NULL; 3772ff9006ddSIgor Mammedov CPUCore *cc = CPU_CORE(dev); 37732e9c10ebSIgor Mammedov const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type); 3774ff9006ddSIgor Mammedov const char *type = object_get_typename(OBJECT(dev)); 3775535455fdSIgor Mammedov CPUArchId *core_slot; 3776535455fdSIgor Mammedov int index; 3777fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 3778ff9006ddSIgor Mammedov 3779c5514d0eSIgor Mammedov if (dev->hotplugged && !mc->has_hotpluggable_cpus) { 3780ff9006ddSIgor Mammedov error_setg(&local_err, "CPU hotplug not supported for this machine"); 3781ff9006ddSIgor Mammedov goto out; 3782ff9006ddSIgor Mammedov } 3783ff9006ddSIgor Mammedov 3784ff9006ddSIgor Mammedov if (strcmp(base_core_type, type)) { 3785ff9006ddSIgor Mammedov error_setg(&local_err, "CPU core type should be %s", base_core_type); 3786ff9006ddSIgor Mammedov goto out; 3787ff9006ddSIgor Mammedov } 3788ff9006ddSIgor Mammedov 3789ff9006ddSIgor Mammedov if (cc->core_id % smp_threads) { 3790ff9006ddSIgor Mammedov error_setg(&local_err, "invalid core id %d", cc->core_id); 3791ff9006ddSIgor Mammedov goto out; 3792ff9006ddSIgor Mammedov } 3793ff9006ddSIgor Mammedov 3794459264efSDavid Gibson /* 3795459264efSDavid Gibson * In general we should have homogeneous threads-per-core, but old 3796459264efSDavid Gibson * (pre hotplug support) machine types allow the last core to have 3797459264efSDavid Gibson * reduced threads as a compatibility hack for when we allowed 3798459264efSDavid Gibson * total vcpus not a multiple of threads-per-core. 3799459264efSDavid Gibson */ 3800459264efSDavid Gibson if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) { 3801df8658deSGreg Kurz error_setg(&local_err, "invalid nr-threads %d, must be %d", 38028149e299SDavid Gibson cc->nr_threads, smp_threads); 3803df8658deSGreg Kurz goto out; 38048149e299SDavid Gibson } 38058149e299SDavid Gibson 3806535455fdSIgor Mammedov core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); 3807535455fdSIgor Mammedov if (!core_slot) { 3808ff9006ddSIgor Mammedov error_setg(&local_err, "core id %d out of range", cc->core_id); 3809ff9006ddSIgor Mammedov goto out; 3810ff9006ddSIgor Mammedov } 3811ff9006ddSIgor Mammedov 3812535455fdSIgor Mammedov if (core_slot->cpu) { 3813ff9006ddSIgor Mammedov error_setg(&local_err, "core %d already populated", cc->core_id); 3814ff9006ddSIgor Mammedov goto out; 3815ff9006ddSIgor Mammedov } 3816ff9006ddSIgor Mammedov 3817a0ceb640SIgor Mammedov numa_cpu_pre_plug(core_slot, dev, &local_err); 38180b8497f0SIgor Mammedov 3819ff9006ddSIgor Mammedov out: 3820ff9006ddSIgor Mammedov error_propagate(errp, local_err); 3821ff9006ddSIgor Mammedov } 3822ff9006ddSIgor Mammedov 3823ce2918cbSDavid Gibson int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, 3824bb2bdd81SGreg Kurz void *fdt, int *fdt_start_offset, Error **errp) 3825bb2bdd81SGreg Kurz { 3826ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev); 3827bb2bdd81SGreg Kurz int intc_phandle; 3828bb2bdd81SGreg Kurz 3829bb2bdd81SGreg Kurz intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp); 3830bb2bdd81SGreg Kurz if (intc_phandle <= 0) { 3831bb2bdd81SGreg Kurz return -1; 3832bb2bdd81SGreg Kurz } 3833bb2bdd81SGreg Kurz 38348cbe71ecSDavid Gibson if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) { 3835bb2bdd81SGreg Kurz error_setg(errp, "unable to create FDT node for PHB %d", sphb->index); 3836bb2bdd81SGreg Kurz return -1; 3837bb2bdd81SGreg Kurz } 3838bb2bdd81SGreg Kurz 3839bb2bdd81SGreg Kurz /* generally SLOF creates these, for hotplug it's up to QEMU */ 3840bb2bdd81SGreg Kurz _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci")); 3841bb2bdd81SGreg Kurz 3842bb2bdd81SGreg Kurz return 0; 3843bb2bdd81SGreg Kurz } 3844bb2bdd81SGreg Kurz 3845bb2bdd81SGreg Kurz static void spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3846bb2bdd81SGreg Kurz Error **errp) 3847bb2bdd81SGreg Kurz { 3848ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3849ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 3850ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 3851bb2bdd81SGreg Kurz const unsigned windows_supported = spapr_phb_windows_supported(sphb); 3852bb2bdd81SGreg Kurz 3853bb2bdd81SGreg Kurz if (dev->hotplugged && !smc->dr_phb_enabled) { 3854bb2bdd81SGreg Kurz error_setg(errp, "PHB hotplug not supported for this machine"); 3855bb2bdd81SGreg Kurz return; 3856bb2bdd81SGreg Kurz } 3857bb2bdd81SGreg Kurz 3858bb2bdd81SGreg Kurz if (sphb->index == (uint32_t)-1) { 3859bb2bdd81SGreg Kurz error_setg(errp, "\"index\" for PAPR PHB is mandatory"); 3860bb2bdd81SGreg Kurz return; 3861bb2bdd81SGreg Kurz } 3862bb2bdd81SGreg Kurz 3863bb2bdd81SGreg Kurz /* 3864bb2bdd81SGreg Kurz * This will check that sphb->index doesn't exceed the maximum number of 3865bb2bdd81SGreg Kurz * PHBs for the current machine type. 3866bb2bdd81SGreg Kurz */ 3867bb2bdd81SGreg Kurz smc->phb_placement(spapr, sphb->index, 3868bb2bdd81SGreg Kurz &sphb->buid, &sphb->io_win_addr, 3869bb2bdd81SGreg Kurz &sphb->mem_win_addr, &sphb->mem64_win_addr, 3870ec132efaSAlexey Kardashevskiy windows_supported, sphb->dma_liobn, 3871ec132efaSAlexey Kardashevskiy &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr, 3872ec132efaSAlexey Kardashevskiy errp); 3873bb2bdd81SGreg Kurz } 3874bb2bdd81SGreg Kurz 3875bb2bdd81SGreg Kurz static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 3876bb2bdd81SGreg Kurz Error **errp) 3877bb2bdd81SGreg Kurz { 3878ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3879ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 3880ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 3881ce2918cbSDavid Gibson SpaprDrc *drc; 3882bb2bdd81SGreg Kurz bool hotplugged = spapr_drc_hotplugged(dev); 3883bb2bdd81SGreg Kurz Error *local_err = NULL; 3884bb2bdd81SGreg Kurz 3885bb2bdd81SGreg Kurz if (!smc->dr_phb_enabled) { 3886bb2bdd81SGreg Kurz return; 3887bb2bdd81SGreg Kurz } 3888bb2bdd81SGreg Kurz 3889bb2bdd81SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 3890bb2bdd81SGreg Kurz /* hotplug hooks should check it's enabled before getting this far */ 3891bb2bdd81SGreg Kurz assert(drc); 3892bb2bdd81SGreg Kurz 3893bb2bdd81SGreg Kurz spapr_drc_attach(drc, DEVICE(dev), &local_err); 3894bb2bdd81SGreg Kurz if (local_err) { 3895bb2bdd81SGreg Kurz error_propagate(errp, local_err); 3896bb2bdd81SGreg Kurz return; 3897bb2bdd81SGreg Kurz } 3898bb2bdd81SGreg Kurz 3899bb2bdd81SGreg Kurz if (hotplugged) { 3900bb2bdd81SGreg Kurz spapr_hotplug_req_add_by_index(drc); 3901bb2bdd81SGreg Kurz } else { 3902bb2bdd81SGreg Kurz spapr_drc_reset(drc); 3903bb2bdd81SGreg Kurz } 3904bb2bdd81SGreg Kurz } 3905bb2bdd81SGreg Kurz 3906bb2bdd81SGreg Kurz void spapr_phb_release(DeviceState *dev) 3907bb2bdd81SGreg Kurz { 3908bb2bdd81SGreg Kurz HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); 3909bb2bdd81SGreg Kurz 3910bb2bdd81SGreg Kurz hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); 391107578b0aSDavid Hildenbrand object_unparent(OBJECT(dev)); 3912bb2bdd81SGreg Kurz } 3913bb2bdd81SGreg Kurz 3914bb2bdd81SGreg Kurz static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 3915bb2bdd81SGreg Kurz { 391607578b0aSDavid Hildenbrand object_property_set_bool(OBJECT(dev), false, "realized", NULL); 3917bb2bdd81SGreg Kurz } 3918bb2bdd81SGreg Kurz 3919bb2bdd81SGreg Kurz static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev, 3920bb2bdd81SGreg Kurz DeviceState *dev, Error **errp) 3921bb2bdd81SGreg Kurz { 3922ce2918cbSDavid Gibson SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); 3923ce2918cbSDavid Gibson SpaprDrc *drc; 3924bb2bdd81SGreg Kurz 3925bb2bdd81SGreg Kurz drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); 3926bb2bdd81SGreg Kurz assert(drc); 3927bb2bdd81SGreg Kurz 3928bb2bdd81SGreg Kurz if (!spapr_drc_unplug_requested(drc)) { 3929bb2bdd81SGreg Kurz spapr_drc_detach(drc); 3930bb2bdd81SGreg Kurz spapr_hotplug_req_remove_by_index(drc); 3931bb2bdd81SGreg Kurz } 3932bb2bdd81SGreg Kurz } 3933bb2bdd81SGreg Kurz 39340fb6bd07SMichael Roth static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 39350fb6bd07SMichael Roth Error **errp) 39360fb6bd07SMichael Roth { 39370fb6bd07SMichael Roth SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 39380fb6bd07SMichael Roth SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev); 39390fb6bd07SMichael Roth 39400fb6bd07SMichael Roth if (spapr->tpm_proxy != NULL) { 39410fb6bd07SMichael Roth error_setg(errp, "Only one TPM proxy can be specified for this machine"); 39420fb6bd07SMichael Roth return; 39430fb6bd07SMichael Roth } 39440fb6bd07SMichael Roth 39450fb6bd07SMichael Roth spapr->tpm_proxy = tpm_proxy; 39460fb6bd07SMichael Roth } 39470fb6bd07SMichael Roth 39480fb6bd07SMichael Roth static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) 39490fb6bd07SMichael Roth { 39500fb6bd07SMichael Roth SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); 39510fb6bd07SMichael Roth 39520fb6bd07SMichael Roth object_property_set_bool(OBJECT(dev), false, "realized", NULL); 39530fb6bd07SMichael Roth object_unparent(OBJECT(dev)); 39540fb6bd07SMichael Roth spapr->tpm_proxy = NULL; 39550fb6bd07SMichael Roth } 39560fb6bd07SMichael Roth 3957c20d332aSBharata B Rao static void spapr_machine_device_plug(HotplugHandler *hotplug_dev, 3958c20d332aSBharata B Rao DeviceState *dev, Error **errp) 3959c20d332aSBharata B Rao { 3960c20d332aSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 396181985f3bSDavid Hildenbrand spapr_memory_plug(hotplug_dev, dev, errp); 3962af81cf32SBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 3963af81cf32SBharata B Rao spapr_core_plug(hotplug_dev, dev, errp); 3964bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 3965bb2bdd81SGreg Kurz spapr_phb_plug(hotplug_dev, dev, errp); 39660fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 39670fb6bd07SMichael Roth spapr_tpm_proxy_plug(hotplug_dev, dev, errp); 3968c20d332aSBharata B Rao } 3969c20d332aSBharata B Rao } 3970c20d332aSBharata B Rao 397188432f44SDavid Hildenbrand static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev, 397288432f44SDavid Hildenbrand DeviceState *dev, Error **errp) 397388432f44SDavid Hildenbrand { 39743ec71474SDavid Hildenbrand if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 39753ec71474SDavid Hildenbrand spapr_memory_unplug(hotplug_dev, dev); 3976a4261be1SDavid Hildenbrand } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 3977a4261be1SDavid Hildenbrand spapr_core_unplug(hotplug_dev, dev); 3978bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 3979bb2bdd81SGreg Kurz spapr_phb_unplug(hotplug_dev, dev); 39800fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 39810fb6bd07SMichael Roth spapr_tpm_proxy_unplug(hotplug_dev, dev); 39823ec71474SDavid Hildenbrand } 398388432f44SDavid Hildenbrand } 398488432f44SDavid Hildenbrand 3985cf632463SBharata B Rao static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev, 3986cf632463SBharata B Rao DeviceState *dev, Error **errp) 3987cf632463SBharata B Rao { 3988ce2918cbSDavid Gibson SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev)); 3989c86c1affSDaniel Henrique Barboza MachineClass *mc = MACHINE_GET_CLASS(sms); 3990ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 3991cf632463SBharata B Rao 3992cf632463SBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 3993cf632463SBharata B Rao if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) { 3994cf632463SBharata B Rao spapr_memory_unplug_request(hotplug_dev, dev, errp); 3995cf632463SBharata B Rao } else { 3996cf632463SBharata B Rao /* NOTE: this means there is a window after guest reset, prior to 3997cf632463SBharata B Rao * CAS negotiation, where unplug requests will fail due to the 3998cf632463SBharata B Rao * capability not being detected yet. This is a bit different than 3999cf632463SBharata B Rao * the case with PCI unplug, where the events will be queued and 4000cf632463SBharata B Rao * eventually handled by the guest after boot 4001cf632463SBharata B Rao */ 4002cf632463SBharata B Rao error_setg(errp, "Memory hot unplug not supported for this guest"); 4003cf632463SBharata B Rao } 40046f4b5c3eSBharata B Rao } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 4005c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 40066f4b5c3eSBharata B Rao error_setg(errp, "CPU hot unplug not supported on this machine"); 40076f4b5c3eSBharata B Rao return; 40086f4b5c3eSBharata B Rao } 4009115debf2SIgor Mammedov spapr_core_unplug_request(hotplug_dev, dev, errp); 4010bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4011bb2bdd81SGreg Kurz if (!smc->dr_phb_enabled) { 4012bb2bdd81SGreg Kurz error_setg(errp, "PHB hot unplug not supported on this machine"); 4013bb2bdd81SGreg Kurz return; 4014bb2bdd81SGreg Kurz } 4015bb2bdd81SGreg Kurz spapr_phb_unplug_request(hotplug_dev, dev, errp); 40160fb6bd07SMichael Roth } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 40170fb6bd07SMichael Roth spapr_tpm_proxy_unplug(hotplug_dev, dev); 4018c20d332aSBharata B Rao } 4019c20d332aSBharata B Rao } 4020c20d332aSBharata B Rao 402194a94e4cSBharata B Rao static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev, 402294a94e4cSBharata B Rao DeviceState *dev, Error **errp) 402394a94e4cSBharata B Rao { 4024c871bc70SLaurent Vivier if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { 4025c871bc70SLaurent Vivier spapr_memory_pre_plug(hotplug_dev, dev, errp); 4026c871bc70SLaurent Vivier } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { 402794a94e4cSBharata B Rao spapr_core_pre_plug(hotplug_dev, dev, errp); 4028bb2bdd81SGreg Kurz } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { 4029bb2bdd81SGreg Kurz spapr_phb_pre_plug(hotplug_dev, dev, errp); 403094a94e4cSBharata B Rao } 403194a94e4cSBharata B Rao } 403294a94e4cSBharata B Rao 40337ebaf795SBharata B Rao static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine, 4034c20d332aSBharata B Rao DeviceState *dev) 4035c20d332aSBharata B Rao { 403694a94e4cSBharata B Rao if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || 4037bb2bdd81SGreg Kurz object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) || 40380fb6bd07SMichael Roth object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) || 40390fb6bd07SMichael Roth object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { 4040c20d332aSBharata B Rao return HOTPLUG_HANDLER(machine); 4041c20d332aSBharata B Rao } 4042cb600087SDavid Gibson if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 4043cb600087SDavid Gibson PCIDevice *pcidev = PCI_DEVICE(dev); 4044cb600087SDavid Gibson PCIBus *root = pci_device_root_bus(pcidev); 4045cb600087SDavid Gibson SpaprPhbState *phb = 4046cb600087SDavid Gibson (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent), 4047cb600087SDavid Gibson TYPE_SPAPR_PCI_HOST_BRIDGE); 4048cb600087SDavid Gibson 4049cb600087SDavid Gibson if (phb) { 4050cb600087SDavid Gibson return HOTPLUG_HANDLER(phb); 4051cb600087SDavid Gibson } 4052cb600087SDavid Gibson } 4053c20d332aSBharata B Rao return NULL; 4054c20d332aSBharata B Rao } 4055c20d332aSBharata B Rao 4056ea089eebSIgor Mammedov static CpuInstanceProperties 4057ea089eebSIgor Mammedov spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index) 405820bb648dSDavid Gibson { 4059ea089eebSIgor Mammedov CPUArchId *core_slot; 4060ea089eebSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 4061ea089eebSIgor Mammedov 4062ea089eebSIgor Mammedov /* make sure possible_cpu are intialized */ 4063ea089eebSIgor Mammedov mc->possible_cpu_arch_ids(machine); 4064ea089eebSIgor Mammedov /* get CPU core slot containing thread that matches cpu_index */ 4065ea089eebSIgor Mammedov core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL); 4066ea089eebSIgor Mammedov assert(core_slot); 4067ea089eebSIgor Mammedov return core_slot->props; 406820bb648dSDavid Gibson } 406920bb648dSDavid Gibson 407079e07936SIgor Mammedov static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx) 407179e07936SIgor Mammedov { 4072aa570207STao Xu return idx / ms->smp.cores % ms->numa_state->num_nodes; 407379e07936SIgor Mammedov } 407479e07936SIgor Mammedov 4075535455fdSIgor Mammedov static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) 4076535455fdSIgor Mammedov { 4077535455fdSIgor Mammedov int i; 4078fe6b6346SLike Xu unsigned int smp_threads = machine->smp.threads; 4079fe6b6346SLike Xu unsigned int smp_cpus = machine->smp.cpus; 4080d342eb76SIgor Mammedov const char *core_type; 4081fe6b6346SLike Xu int spapr_max_cores = machine->smp.max_cpus / smp_threads; 4082535455fdSIgor Mammedov MachineClass *mc = MACHINE_GET_CLASS(machine); 4083535455fdSIgor Mammedov 4084c5514d0eSIgor Mammedov if (!mc->has_hotpluggable_cpus) { 4085535455fdSIgor Mammedov spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads; 4086535455fdSIgor Mammedov } 4087535455fdSIgor Mammedov if (machine->possible_cpus) { 4088535455fdSIgor Mammedov assert(machine->possible_cpus->len == spapr_max_cores); 4089535455fdSIgor Mammedov return machine->possible_cpus; 4090535455fdSIgor Mammedov } 4091535455fdSIgor Mammedov 4092d342eb76SIgor Mammedov core_type = spapr_get_cpu_core_type(machine->cpu_type); 4093d342eb76SIgor Mammedov if (!core_type) { 4094d342eb76SIgor Mammedov error_report("Unable to find sPAPR CPU Core definition"); 4095d342eb76SIgor Mammedov exit(1); 4096d342eb76SIgor Mammedov } 4097d342eb76SIgor Mammedov 4098535455fdSIgor Mammedov machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + 4099535455fdSIgor Mammedov sizeof(CPUArchId) * spapr_max_cores); 4100535455fdSIgor Mammedov machine->possible_cpus->len = spapr_max_cores; 4101535455fdSIgor Mammedov for (i = 0; i < machine->possible_cpus->len; i++) { 4102535455fdSIgor Mammedov int core_id = i * smp_threads; 4103535455fdSIgor Mammedov 4104d342eb76SIgor Mammedov machine->possible_cpus->cpus[i].type = core_type; 4105f2d672c2SIgor Mammedov machine->possible_cpus->cpus[i].vcpus_count = smp_threads; 4106535455fdSIgor Mammedov machine->possible_cpus->cpus[i].arch_id = core_id; 4107535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.has_core_id = true; 4108535455fdSIgor Mammedov machine->possible_cpus->cpus[i].props.core_id = core_id; 4109535455fdSIgor Mammedov } 4110535455fdSIgor Mammedov return machine->possible_cpus; 4111535455fdSIgor Mammedov } 4112535455fdSIgor Mammedov 4113ce2918cbSDavid Gibson static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, 4114daa23699SDavid Gibson uint64_t *buid, hwaddr *pio, 4115daa23699SDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 4116ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4117ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 41186737d9adSDavid Gibson { 4119357d1e3bSDavid Gibson /* 4120357d1e3bSDavid Gibson * New-style PHB window placement. 4121357d1e3bSDavid Gibson * 4122357d1e3bSDavid Gibson * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window 4123357d1e3bSDavid Gibson * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO 4124357d1e3bSDavid Gibson * windows. 4125357d1e3bSDavid Gibson * 4126357d1e3bSDavid Gibson * Some guest kernels can't work with MMIO windows above 1<<46 4127357d1e3bSDavid Gibson * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB 4128357d1e3bSDavid Gibson * 4129357d1e3bSDavid Gibson * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each 4130357d1e3bSDavid Gibson * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the 4131357d1e3bSDavid Gibson * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the 4132357d1e3bSDavid Gibson * 1TiB 64-bit MMIO windows for each PHB. 4133357d1e3bSDavid Gibson */ 41346737d9adSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 41356737d9adSDavid Gibson int i; 41366737d9adSDavid Gibson 4137357d1e3bSDavid Gibson /* Sanity check natural alignments */ 4138357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 4139357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0); 4140357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0); 4141357d1e3bSDavid Gibson QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0); 4142357d1e3bSDavid Gibson /* Sanity check bounds */ 414325e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) > 414425e6a118SMichael S. Tsirkin SPAPR_PCI_MEM32_WIN_SIZE); 414525e6a118SMichael S. Tsirkin QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) > 414625e6a118SMichael S. Tsirkin SPAPR_PCI_MEM64_WIN_SIZE); 41472efff1c0SDavid Gibson 414825e6a118SMichael S. Tsirkin if (index >= SPAPR_MAX_PHBS) { 414925e6a118SMichael S. Tsirkin error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)", 415025e6a118SMichael S. Tsirkin SPAPR_MAX_PHBS - 1); 41516737d9adSDavid Gibson return; 41526737d9adSDavid Gibson } 41536737d9adSDavid Gibson 41546737d9adSDavid Gibson *buid = base_buid + index; 41556737d9adSDavid Gibson for (i = 0; i < n_dma; ++i) { 41566737d9adSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 41576737d9adSDavid Gibson } 41586737d9adSDavid Gibson 4159357d1e3bSDavid Gibson *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; 4160357d1e3bSDavid Gibson *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; 4161357d1e3bSDavid Gibson *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; 4162ec132efaSAlexey Kardashevskiy 4163ec132efaSAlexey Kardashevskiy *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE; 4164ec132efaSAlexey Kardashevskiy *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE; 41656737d9adSDavid Gibson } 41666737d9adSDavid Gibson 41677844e12bSCédric Le Goater static ICSState *spapr_ics_get(XICSFabric *dev, int irq) 41687844e12bSCédric Le Goater { 4169ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(dev); 41707844e12bSCédric Le Goater 41717844e12bSCédric Le Goater return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL; 41727844e12bSCédric Le Goater } 41737844e12bSCédric Le Goater 41747844e12bSCédric Le Goater static void spapr_ics_resend(XICSFabric *dev) 41757844e12bSCédric Le Goater { 4176ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(dev); 41777844e12bSCédric Le Goater 41787844e12bSCédric Le Goater ics_resend(spapr->ics); 41797844e12bSCédric Le Goater } 41807844e12bSCédric Le Goater 418181210c20SSam Bobroff static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id) 4182b2fc59aaSCédric Le Goater { 41832e886fb3SSam Bobroff PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); 4184b2fc59aaSCédric Le Goater 4185a28b9a5aSCédric Le Goater return cpu ? spapr_cpu_state(cpu)->icp : NULL; 4186b2fc59aaSCédric Le Goater } 4187b2fc59aaSCédric Le Goater 41886449da45SCédric Le Goater static void spapr_pic_print_info(InterruptStatsProvider *obj, 41896449da45SCédric Le Goater Monitor *mon) 41906449da45SCédric Le Goater { 4191ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(obj); 41926449da45SCédric Le Goater 4193328d8eb2SDavid Gibson spapr_irq_print_info(spapr, mon); 4194f041d6afSGreg Kurz monitor_printf(mon, "irqchip: %s\n", 4195f041d6afSGreg Kurz kvm_irqchip_in_kernel() ? "in-kernel" : "emulated"); 41966449da45SCédric Le Goater } 41976449da45SCédric Le Goater 4198baa45b17SCédric Le Goater /* 4199baa45b17SCédric Le Goater * This is a XIVE only operation 4200baa45b17SCédric Le Goater */ 4201932de7aeSCédric Le Goater static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, 4202932de7aeSCédric Le Goater uint8_t nvt_blk, uint32_t nvt_idx, 4203932de7aeSCédric Le Goater bool cam_ignore, uint8_t priority, 4204932de7aeSCédric Le Goater uint32_t logic_serv, XiveTCTXMatch *match) 4205932de7aeSCédric Le Goater { 4206932de7aeSCédric Le Goater SpaprMachineState *spapr = SPAPR_MACHINE(xfb); 4207baa45b17SCédric Le Goater XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc); 4208932de7aeSCédric Le Goater XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); 4209932de7aeSCédric Le Goater int count; 4210932de7aeSCédric Le Goater 4211932de7aeSCédric Le Goater count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, 4212932de7aeSCédric Le Goater priority, logic_serv, match); 4213932de7aeSCédric Le Goater if (count < 0) { 4214932de7aeSCédric Le Goater return count; 4215932de7aeSCédric Le Goater } 4216932de7aeSCédric Le Goater 4217932de7aeSCédric Le Goater /* 4218932de7aeSCédric Le Goater * When we implement the save and restore of the thread interrupt 4219932de7aeSCédric Le Goater * contexts in the enter/exit CPU handlers of the machine and the 4220932de7aeSCédric Le Goater * escalations in QEMU, we should be able to handle non dispatched 4221932de7aeSCédric Le Goater * vCPUs. 4222932de7aeSCédric Le Goater * 4223932de7aeSCédric Le Goater * Until this is done, the sPAPR machine should find at least one 4224932de7aeSCédric Le Goater * matching context always. 4225932de7aeSCédric Le Goater */ 4226932de7aeSCédric Le Goater if (count == 0) { 4227932de7aeSCédric Le Goater qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n", 4228932de7aeSCédric Le Goater nvt_blk, nvt_idx); 4229932de7aeSCédric Le Goater } 4230932de7aeSCédric Le Goater 4231932de7aeSCédric Le Goater return count; 4232932de7aeSCédric Le Goater } 4233932de7aeSCédric Le Goater 423414bb4486SGreg Kurz int spapr_get_vcpu_id(PowerPCCPU *cpu) 42352e886fb3SSam Bobroff { 4236b1a568c1SGreg Kurz return cpu->vcpu_id; 42372e886fb3SSam Bobroff } 42382e886fb3SSam Bobroff 4239648edb64SGreg Kurz void spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp) 4240648edb64SGreg Kurz { 4241ce2918cbSDavid Gibson SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 4242fe6b6346SLike Xu MachineState *ms = MACHINE(spapr); 4243648edb64SGreg Kurz int vcpu_id; 4244648edb64SGreg Kurz 42455d0fb150SGreg Kurz vcpu_id = spapr_vcpu_id(spapr, cpu_index); 4246648edb64SGreg Kurz 4247648edb64SGreg Kurz if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) { 4248648edb64SGreg Kurz error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id); 4249648edb64SGreg Kurz error_append_hint(errp, "Adjust the number of cpus to %d " 4250648edb64SGreg Kurz "or try to raise the number of threads per core\n", 4251fe6b6346SLike Xu vcpu_id * ms->smp.threads / spapr->vsmt); 4252648edb64SGreg Kurz return; 4253648edb64SGreg Kurz } 4254648edb64SGreg Kurz 4255648edb64SGreg Kurz cpu->vcpu_id = vcpu_id; 4256648edb64SGreg Kurz } 4257648edb64SGreg Kurz 42582e886fb3SSam Bobroff PowerPCCPU *spapr_find_cpu(int vcpu_id) 42592e886fb3SSam Bobroff { 42602e886fb3SSam Bobroff CPUState *cs; 42612e886fb3SSam Bobroff 42622e886fb3SSam Bobroff CPU_FOREACH(cs) { 42632e886fb3SSam Bobroff PowerPCCPU *cpu = POWERPC_CPU(cs); 42642e886fb3SSam Bobroff 426514bb4486SGreg Kurz if (spapr_get_vcpu_id(cpu) == vcpu_id) { 42662e886fb3SSam Bobroff return cpu; 42672e886fb3SSam Bobroff } 42682e886fb3SSam Bobroff } 42692e886fb3SSam Bobroff 42702e886fb3SSam Bobroff return NULL; 42712e886fb3SSam Bobroff } 42722e886fb3SSam Bobroff 427303ef074cSNicholas Piggin static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) 427403ef074cSNicholas Piggin { 427503ef074cSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 427603ef074cSNicholas Piggin 427703ef074cSNicholas Piggin /* These are only called by TCG, KVM maintains dispatch state */ 427803ef074cSNicholas Piggin 42793a6e6224SNicholas Piggin spapr_cpu->prod = false; 428003ef074cSNicholas Piggin if (spapr_cpu->vpa_addr) { 428103ef074cSNicholas Piggin CPUState *cs = CPU(cpu); 428203ef074cSNicholas Piggin uint32_t dispatch; 428303ef074cSNicholas Piggin 428403ef074cSNicholas Piggin dispatch = ldl_be_phys(cs->as, 428503ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); 428603ef074cSNicholas Piggin dispatch++; 428703ef074cSNicholas Piggin if ((dispatch & 1) != 0) { 428803ef074cSNicholas Piggin qemu_log_mask(LOG_GUEST_ERROR, 428903ef074cSNicholas Piggin "VPA: incorrect dispatch counter value for " 429003ef074cSNicholas Piggin "dispatched partition %u, correcting.\n", dispatch); 429103ef074cSNicholas Piggin dispatch++; 429203ef074cSNicholas Piggin } 429303ef074cSNicholas Piggin stl_be_phys(cs->as, 429403ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); 429503ef074cSNicholas Piggin } 429603ef074cSNicholas Piggin } 429703ef074cSNicholas Piggin 429803ef074cSNicholas Piggin static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) 429903ef074cSNicholas Piggin { 430003ef074cSNicholas Piggin SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 430103ef074cSNicholas Piggin 430203ef074cSNicholas Piggin if (spapr_cpu->vpa_addr) { 430303ef074cSNicholas Piggin CPUState *cs = CPU(cpu); 430403ef074cSNicholas Piggin uint32_t dispatch; 430503ef074cSNicholas Piggin 430603ef074cSNicholas Piggin dispatch = ldl_be_phys(cs->as, 430703ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); 430803ef074cSNicholas Piggin dispatch++; 430903ef074cSNicholas Piggin if ((dispatch & 1) != 1) { 431003ef074cSNicholas Piggin qemu_log_mask(LOG_GUEST_ERROR, 431103ef074cSNicholas Piggin "VPA: incorrect dispatch counter value for " 431203ef074cSNicholas Piggin "preempted partition %u, correcting.\n", dispatch); 431303ef074cSNicholas Piggin dispatch++; 431403ef074cSNicholas Piggin } 431503ef074cSNicholas Piggin stl_be_phys(cs->as, 431603ef074cSNicholas Piggin spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); 431703ef074cSNicholas Piggin } 431803ef074cSNicholas Piggin } 431903ef074cSNicholas Piggin 432029ee3247SAlexey Kardashevskiy static void spapr_machine_class_init(ObjectClass *oc, void *data) 432153018216SPaolo Bonzini { 432229ee3247SAlexey Kardashevskiy MachineClass *mc = MACHINE_CLASS(oc); 4323ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc); 432471461b0fSAlexey Kardashevskiy FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); 432534316482SAlexey Kardashevskiy NMIClass *nc = NMI_CLASS(oc); 4326c20d332aSBharata B Rao HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); 43271d1be34dSDavid Gibson PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); 43287844e12bSCédric Le Goater XICSFabricClass *xic = XICS_FABRIC_CLASS(oc); 43296449da45SCédric Le Goater InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); 4330932de7aeSCédric Le Goater XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc); 433129ee3247SAlexey Kardashevskiy 43320eb9054cSDavid Gibson mc->desc = "pSeries Logical Partition (PAPR compliant)"; 4333907aac2fSMark Cave-Ayland mc->ignore_boot_device_suffixes = true; 4334fc9f38c3SDavid Gibson 4335fc9f38c3SDavid Gibson /* 4336fc9f38c3SDavid Gibson * We set up the default / latest behaviour here. The class_init 4337fc9f38c3SDavid Gibson * functions for the specific versioned machine types can override 4338fc9f38c3SDavid Gibson * these details for backwards compatibility 4339fc9f38c3SDavid Gibson */ 4340bcb5ce08SDavid Gibson mc->init = spapr_machine_init; 4341bcb5ce08SDavid Gibson mc->reset = spapr_machine_reset; 4342958db90cSMarcel Apfelbaum mc->block_default_type = IF_SCSI; 43436244bb7eSGreg Kurz mc->max_cpus = 1024; 4344958db90cSMarcel Apfelbaum mc->no_parallel = 1; 43455b2128d2SAlexander Graf mc->default_boot_order = ""; 4346d23b6caaSPhilippe Mathieu-Daudé mc->default_ram_size = 512 * MiB; 434729f9cef3SSebastian Bauer mc->default_display = "std"; 4348958db90cSMarcel Apfelbaum mc->kvm_type = spapr_kvm_type; 43497da79a16SEduardo Habkost machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE); 4350e4024630SLaurent Vivier mc->pci_allow_0_address = true; 4351debbdc00SIgor Mammedov assert(!mc->get_hotplug_handler); 43527ebaf795SBharata B Rao mc->get_hotplug_handler = spapr_get_hotplug_handler; 435394a94e4cSBharata B Rao hc->pre_plug = spapr_machine_device_pre_plug; 4354c20d332aSBharata B Rao hc->plug = spapr_machine_device_plug; 4355ea089eebSIgor Mammedov mc->cpu_index_to_instance_props = spapr_cpu_index_to_props; 435679e07936SIgor Mammedov mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id; 4357535455fdSIgor Mammedov mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids; 4358cf632463SBharata B Rao hc->unplug_request = spapr_machine_device_unplug_request; 435988432f44SDavid Hildenbrand hc->unplug = spapr_machine_device_unplug; 436000b4fbe2SMarcel Apfelbaum 4361fc9f38c3SDavid Gibson smc->dr_lmb_enabled = true; 4362fea35ca4SAlexey Kardashevskiy smc->update_dt_enabled = true; 436334a6b015SCédric Le Goater mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); 4364c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = true; 436552b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; 436671461b0fSAlexey Kardashevskiy fwc->get_dev_path = spapr_get_fw_dev_path; 436734316482SAlexey Kardashevskiy nc->nmi_monitor_handler = spapr_nmi; 43686737d9adSDavid Gibson smc->phb_placement = spapr_phb_placement; 43691d1be34dSDavid Gibson vhc->hypercall = emulate_spapr_hypercall; 4370e57ca75cSDavid Gibson vhc->hpt_mask = spapr_hpt_mask; 4371e57ca75cSDavid Gibson vhc->map_hptes = spapr_map_hptes; 4372e57ca75cSDavid Gibson vhc->unmap_hptes = spapr_unmap_hptes; 4373a2dd4e83SBenjamin Herrenschmidt vhc->hpte_set_c = spapr_hpte_set_c; 4374a2dd4e83SBenjamin Herrenschmidt vhc->hpte_set_r = spapr_hpte_set_r; 437579825f4dSBenjamin Herrenschmidt vhc->get_pate = spapr_get_pate; 43761ec26c75SGreg Kurz vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr; 437703ef074cSNicholas Piggin vhc->cpu_exec_enter = spapr_cpu_exec_enter; 437803ef074cSNicholas Piggin vhc->cpu_exec_exit = spapr_cpu_exec_exit; 43797844e12bSCédric Le Goater xic->ics_get = spapr_ics_get; 43807844e12bSCédric Le Goater xic->ics_resend = spapr_ics_resend; 4381b2fc59aaSCédric Le Goater xic->icp_get = spapr_icp_get; 43826449da45SCédric Le Goater ispc->print_info = spapr_pic_print_info; 438355641213SLaurent Vivier /* Force NUMA node memory size to be a multiple of 438455641213SLaurent Vivier * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity 438555641213SLaurent Vivier * in which LMBs are represented and hot-added 438655641213SLaurent Vivier */ 438755641213SLaurent Vivier mc->numa_mem_align_shift = 28; 4388cd5ff833SIgor Mammedov mc->numa_mem_supported = true; 43890533ef5fSTao Xu mc->auto_enable_numa = true; 439033face6bSDavid Gibson 43914e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF; 43924e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON; 43934e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON; 43942782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; 43952782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; 43962782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND; 43972309832aSDavid Gibson smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */ 4398b9a477b7SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF; 4399edaa7995SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON; 44008ff43ee4SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF; 440133face6bSDavid Gibson spapr_caps_add_properties(smc, &error_abort); 4402bd94bc06SCédric Le Goater smc->irq = &spapr_irq_dual; 4403dae5e39aSMichael Roth smc->dr_phb_enabled = true; 44046c3829a2SAlexey Kardashevskiy smc->linux_pci_probe = true; 440529cb4187SGreg Kurz smc->smp_threads_vsmt = true; 440654255c1fSDavid Gibson smc->nr_xirqs = SPAPR_NR_XIRQS; 4407932de7aeSCédric Le Goater xfc->match_nvt = spapr_match_nvt; 440853018216SPaolo Bonzini } 440953018216SPaolo Bonzini 441029ee3247SAlexey Kardashevskiy static const TypeInfo spapr_machine_info = { 441129ee3247SAlexey Kardashevskiy .name = TYPE_SPAPR_MACHINE, 441229ee3247SAlexey Kardashevskiy .parent = TYPE_MACHINE, 44134aee7362SDavid Gibson .abstract = true, 4414ce2918cbSDavid Gibson .instance_size = sizeof(SpaprMachineState), 4415bcb5ce08SDavid Gibson .instance_init = spapr_instance_init, 441687bbdd9cSDavid Gibson .instance_finalize = spapr_machine_finalizefn, 4417ce2918cbSDavid Gibson .class_size = sizeof(SpaprMachineClass), 441829ee3247SAlexey Kardashevskiy .class_init = spapr_machine_class_init, 441971461b0fSAlexey Kardashevskiy .interfaces = (InterfaceInfo[]) { 442071461b0fSAlexey Kardashevskiy { TYPE_FW_PATH_PROVIDER }, 442134316482SAlexey Kardashevskiy { TYPE_NMI }, 4422c20d332aSBharata B Rao { TYPE_HOTPLUG_HANDLER }, 44231d1be34dSDavid Gibson { TYPE_PPC_VIRTUAL_HYPERVISOR }, 44247844e12bSCédric Le Goater { TYPE_XICS_FABRIC }, 44256449da45SCédric Le Goater { TYPE_INTERRUPT_STATS_PROVIDER }, 4426932de7aeSCédric Le Goater { TYPE_XIVE_FABRIC }, 442771461b0fSAlexey Kardashevskiy { } 442871461b0fSAlexey Kardashevskiy }, 442929ee3247SAlexey Kardashevskiy }; 443029ee3247SAlexey Kardashevskiy 4431fccbc785SDavid Gibson #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \ 44325013c547SDavid Gibson static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \ 44335013c547SDavid Gibson void *data) \ 44345013c547SDavid Gibson { \ 44355013c547SDavid Gibson MachineClass *mc = MACHINE_CLASS(oc); \ 44365013c547SDavid Gibson spapr_machine_##suffix##_class_options(mc); \ 4437fccbc785SDavid Gibson if (latest) { \ 4438fccbc785SDavid Gibson mc->alias = "pseries"; \ 4439fccbc785SDavid Gibson mc->is_default = 1; \ 4440fccbc785SDavid Gibson } \ 44415013c547SDavid Gibson } \ 44425013c547SDavid Gibson static const TypeInfo spapr_machine_##suffix##_info = { \ 44435013c547SDavid Gibson .name = MACHINE_TYPE_NAME("pseries-" verstr), \ 44445013c547SDavid Gibson .parent = TYPE_SPAPR_MACHINE, \ 44455013c547SDavid Gibson .class_init = spapr_machine_##suffix##_class_init, \ 44465013c547SDavid Gibson }; \ 44475013c547SDavid Gibson static void spapr_machine_register_##suffix(void) \ 44485013c547SDavid Gibson { \ 44495013c547SDavid Gibson type_register(&spapr_machine_##suffix##_info); \ 44505013c547SDavid Gibson } \ 44510e6aac87SEduardo Habkost type_init(spapr_machine_register_##suffix) 44525013c547SDavid Gibson 44531c5f29bbSDavid Gibson /* 44543eb74d20SCornelia Huck * pseries-5.0 44553eb74d20SCornelia Huck */ 44563eb74d20SCornelia Huck static void spapr_machine_5_0_class_options(MachineClass *mc) 44573eb74d20SCornelia Huck { 44583eb74d20SCornelia Huck /* Defaults for the latest behaviour inherited from the base class */ 44593eb74d20SCornelia Huck } 44603eb74d20SCornelia Huck 44613eb74d20SCornelia Huck DEFINE_SPAPR_MACHINE(5_0, "5.0", true); 44623eb74d20SCornelia Huck 44633eb74d20SCornelia Huck /* 44649aec2e52SCornelia Huck * pseries-4.2 4465e2676b16SGreg Kurz */ 44669aec2e52SCornelia Huck static void spapr_machine_4_2_class_options(MachineClass *mc) 4467e2676b16SGreg Kurz { 44683eb74d20SCornelia Huck spapr_machine_5_0_class_options(mc); 44695f258577SEvgeny Yakovlev compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len); 4470e2676b16SGreg Kurz } 4471e2676b16SGreg Kurz 44723eb74d20SCornelia Huck DEFINE_SPAPR_MACHINE(4_2, "4.2", false); 44739aec2e52SCornelia Huck 44749aec2e52SCornelia Huck /* 44759aec2e52SCornelia Huck * pseries-4.1 44769aec2e52SCornelia Huck */ 44779aec2e52SCornelia Huck static void spapr_machine_4_1_class_options(MachineClass *mc) 44789aec2e52SCornelia Huck { 44796c3829a2SAlexey Kardashevskiy SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4480d15d4ad6SDavid Gibson static GlobalProperty compat[] = { 4481d15d4ad6SDavid Gibson /* Only allow 4kiB and 64kiB IOMMU pagesizes */ 4482d15d4ad6SDavid Gibson { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" }, 4483d15d4ad6SDavid Gibson }; 4484d15d4ad6SDavid Gibson 44859aec2e52SCornelia Huck spapr_machine_4_2_class_options(mc); 44866c3829a2SAlexey Kardashevskiy smc->linux_pci_probe = false; 448729cb4187SGreg Kurz smc->smp_threads_vsmt = false; 44889aec2e52SCornelia Huck compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len); 4489d15d4ad6SDavid Gibson compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 44909aec2e52SCornelia Huck } 44919aec2e52SCornelia Huck 44929aec2e52SCornelia Huck DEFINE_SPAPR_MACHINE(4_1, "4.1", false); 44939bf2650bSCornelia Huck 44949bf2650bSCornelia Huck /* 44959bf2650bSCornelia Huck * pseries-4.0 44969bf2650bSCornelia Huck */ 4497eb3cba82SDavid Gibson static void phb_placement_4_0(SpaprMachineState *spapr, uint32_t index, 4498ec132efaSAlexey Kardashevskiy uint64_t *buid, hwaddr *pio, 4499ec132efaSAlexey Kardashevskiy hwaddr *mmio32, hwaddr *mmio64, 4500ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4501ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4502ec132efaSAlexey Kardashevskiy { 4503ec132efaSAlexey Kardashevskiy spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, liobns, 4504ec132efaSAlexey Kardashevskiy nv2gpa, nv2atsd, errp); 4505ec132efaSAlexey Kardashevskiy *nv2gpa = 0; 4506ec132efaSAlexey Kardashevskiy *nv2atsd = 0; 4507ec132efaSAlexey Kardashevskiy } 4508ec132efaSAlexey Kardashevskiy 4509eb3cba82SDavid Gibson static void spapr_machine_4_0_class_options(MachineClass *mc) 4510eb3cba82SDavid Gibson { 4511eb3cba82SDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4512eb3cba82SDavid Gibson 4513eb3cba82SDavid Gibson spapr_machine_4_1_class_options(mc); 4514eb3cba82SDavid Gibson compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); 4515eb3cba82SDavid Gibson smc->phb_placement = phb_placement_4_0; 4516bd94bc06SCédric Le Goater smc->irq = &spapr_irq_xics; 45173725ef1aSGreg Kurz smc->pre_4_1_migration = true; 4518eb3cba82SDavid Gibson } 4519eb3cba82SDavid Gibson 4520eb3cba82SDavid Gibson DEFINE_SPAPR_MACHINE(4_0, "4.0", false); 4521eb3cba82SDavid Gibson 4522eb3cba82SDavid Gibson /* 4523eb3cba82SDavid Gibson * pseries-3.1 4524eb3cba82SDavid Gibson */ 452588cbe073SMarc-André Lureau static void spapr_machine_3_1_class_options(MachineClass *mc) 452688cbe073SMarc-André Lureau { 4527ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4528fea35ca4SAlexey Kardashevskiy 452984e060bfSAlex Williamson spapr_machine_4_0_class_options(mc); 4530abd93cc7SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); 453127461d69SPrasad J Pandit 453234a6b015SCédric Le Goater mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); 4533fea35ca4SAlexey Kardashevskiy smc->update_dt_enabled = false; 4534dae5e39aSMichael Roth smc->dr_phb_enabled = false; 45350a794529SDavid Gibson smc->broken_host_serial_model = true; 45362782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN; 45372782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN; 45382782ad4cSSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN; 4539edaa7995SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; 454084e060bfSAlex Williamson } 454184e060bfSAlex Williamson 454284e060bfSAlex Williamson DEFINE_SPAPR_MACHINE(3_1, "3.1", false); 4543d45360d9SCédric Le Goater 4544d45360d9SCédric Le Goater /* 4545d45360d9SCédric Le Goater * pseries-3.0 4546d45360d9SCédric Le Goater */ 4547d45360d9SCédric Le Goater 4548d45360d9SCédric Le Goater static void spapr_machine_3_0_class_options(MachineClass *mc) 4549d45360d9SCédric Le Goater { 4550ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 455182cffa2eSCédric Le Goater 4552d45360d9SCédric Le Goater spapr_machine_3_1_class_options(mc); 4553ddb3235dSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len); 455482cffa2eSCédric Le Goater 455582cffa2eSCédric Le Goater smc->legacy_irq_allocation = true; 455654255c1fSDavid Gibson smc->nr_xirqs = 0x400; 4557ae837402SCédric Le Goater smc->irq = &spapr_irq_xics_legacy; 4558d45360d9SCédric Le Goater } 4559d45360d9SCédric Le Goater 4560d45360d9SCédric Le Goater DEFINE_SPAPR_MACHINE(3_0, "3.0", false); 45618a4fd427SDavid Gibson 45628a4fd427SDavid Gibson /* 45638a4fd427SDavid Gibson * pseries-2.12 45648a4fd427SDavid Gibson */ 456588cbe073SMarc-André Lureau static void spapr_machine_2_12_class_options(MachineClass *mc) 456688cbe073SMarc-André Lureau { 4567ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 456888cbe073SMarc-André Lureau static GlobalProperty compat[] = { 45696c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" }, 45706c36bddfSEduardo Habkost { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" }, 4571fa386d98SMarc-André Lureau }; 45728a4fd427SDavid Gibson 4573d8c0c7afSPeter Maydell spapr_machine_3_0_class_options(mc); 45740d47310bSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); 457588cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 45762309832aSDavid Gibson 4577e8937295SGreg Kurz /* We depend on kvm_enabled() to choose a default value for the 4578e8937295SGreg Kurz * hpt-max-page-size capability. Of course we can't do it here 4579e8937295SGreg Kurz * because this is too early and the HW accelerator isn't initialzed 4580e8937295SGreg Kurz * yet. Postpone this to machine init (see default_caps_with_cpu()). 4581e8937295SGreg Kurz */ 4582e8937295SGreg Kurz smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0; 45838a4fd427SDavid Gibson } 45848a4fd427SDavid Gibson 45858a4fd427SDavid Gibson DEFINE_SPAPR_MACHINE(2_12, "2.12", false); 45862b615412SDavid Gibson 4587813f3cf6SSuraj Jitindar Singh static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc) 4588813f3cf6SSuraj Jitindar Singh { 4589ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4590813f3cf6SSuraj Jitindar Singh 4591813f3cf6SSuraj Jitindar Singh spapr_machine_2_12_class_options(mc); 4592813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; 4593813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; 4594813f3cf6SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD; 4595813f3cf6SSuraj Jitindar Singh } 4596813f3cf6SSuraj Jitindar Singh 4597813f3cf6SSuraj Jitindar Singh DEFINE_SPAPR_MACHINE(2_12_sxxm, "2.12-sxxm", false); 4598813f3cf6SSuraj Jitindar Singh 45992b615412SDavid Gibson /* 46002b615412SDavid Gibson * pseries-2.11 46012b615412SDavid Gibson */ 46022b615412SDavid Gibson 46032b615412SDavid Gibson static void spapr_machine_2_11_class_options(MachineClass *mc) 46042b615412SDavid Gibson { 4605ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4606ee76a09fSDavid Gibson 46072b615412SDavid Gibson spapr_machine_2_12_class_options(mc); 46084e5fe368SSuraj Jitindar Singh smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON; 460943df70a9SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); 46102b615412SDavid Gibson } 46112b615412SDavid Gibson 46122b615412SDavid Gibson DEFINE_SPAPR_MACHINE(2_11, "2.11", false); 4613e2676b16SGreg Kurz 4614e2676b16SGreg Kurz /* 46153fa14fbeSDavid Gibson * pseries-2.10 4616db800b21SDavid Gibson */ 4617e2676b16SGreg Kurz 46183fa14fbeSDavid Gibson static void spapr_machine_2_10_class_options(MachineClass *mc) 4619db800b21SDavid Gibson { 4620e2676b16SGreg Kurz spapr_machine_2_11_class_options(mc); 4621503224f4SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); 4622db800b21SDavid Gibson } 4623db800b21SDavid Gibson 4624e2676b16SGreg Kurz DEFINE_SPAPR_MACHINE(2_10, "2.10", false); 46253fa14fbeSDavid Gibson 46263fa14fbeSDavid Gibson /* 46273fa14fbeSDavid Gibson * pseries-2.9 46283fa14fbeSDavid Gibson */ 462988cbe073SMarc-André Lureau 463088cbe073SMarc-André Lureau static void spapr_machine_2_9_class_options(MachineClass *mc) 463188cbe073SMarc-André Lureau { 4632ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 463388cbe073SMarc-André Lureau static GlobalProperty compat[] = { 46346c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" }, 4635fa386d98SMarc-André Lureau }; 46363fa14fbeSDavid Gibson 46373fa14fbeSDavid Gibson spapr_machine_2_10_class_options(mc); 46383e803152SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); 463988cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 46403bfe5716SLaurent Vivier mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram; 464146f7afa3SGreg Kurz smc->pre_2_10_has_unused_icps = true; 464252b81ab5SDavid Gibson smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED; 46433fa14fbeSDavid Gibson } 46443fa14fbeSDavid Gibson 46453fa14fbeSDavid Gibson DEFINE_SPAPR_MACHINE(2_9, "2.9", false); 4646fa325e6cSDavid Gibson 4647fa325e6cSDavid Gibson /* 4648fa325e6cSDavid Gibson * pseries-2.8 4649fa325e6cSDavid Gibson */ 465088cbe073SMarc-André Lureau 465188cbe073SMarc-André Lureau static void spapr_machine_2_8_class_options(MachineClass *mc) 465288cbe073SMarc-André Lureau { 465388cbe073SMarc-André Lureau static GlobalProperty compat[] = { 46546c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" }, 4655fa386d98SMarc-André Lureau }; 4656fa325e6cSDavid Gibson 4657fa325e6cSDavid Gibson spapr_machine_2_9_class_options(mc); 4658edc24ccdSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); 465988cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 466055641213SLaurent Vivier mc->numa_mem_align_shift = 23; 4661fa325e6cSDavid Gibson } 4662fa325e6cSDavid Gibson 4663fa325e6cSDavid Gibson DEFINE_SPAPR_MACHINE(2_8, "2.8", false); 4664db800b21SDavid Gibson 4665db800b21SDavid Gibson /* 46661ea1eefcSBharata B Rao * pseries-2.7 46671ea1eefcSBharata B Rao */ 4668357d1e3bSDavid Gibson 4669ce2918cbSDavid Gibson static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, 4670357d1e3bSDavid Gibson uint64_t *buid, hwaddr *pio, 4671357d1e3bSDavid Gibson hwaddr *mmio32, hwaddr *mmio64, 4672ec132efaSAlexey Kardashevskiy unsigned n_dma, uint32_t *liobns, 4673ec132efaSAlexey Kardashevskiy hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4674357d1e3bSDavid Gibson { 4675357d1e3bSDavid Gibson /* Legacy PHB placement for pseries-2.7 and earlier machine types */ 4676357d1e3bSDavid Gibson const uint64_t base_buid = 0x800000020000000ULL; 4677357d1e3bSDavid Gibson const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */ 4678357d1e3bSDavid Gibson const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */ 4679357d1e3bSDavid Gibson const hwaddr pio_offset = 0x80000000; /* 2 GiB */ 4680357d1e3bSDavid Gibson const uint32_t max_index = 255; 4681357d1e3bSDavid Gibson const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */ 4682357d1e3bSDavid Gibson 4683357d1e3bSDavid Gibson uint64_t ram_top = MACHINE(spapr)->ram_size; 4684357d1e3bSDavid Gibson hwaddr phb0_base, phb_base; 4685357d1e3bSDavid Gibson int i; 4686357d1e3bSDavid Gibson 46870c9269a5SDavid Hildenbrand /* Do we have device memory? */ 4688357d1e3bSDavid Gibson if (MACHINE(spapr)->maxram_size > ram_top) { 4689357d1e3bSDavid Gibson /* Can't just use maxram_size, because there may be an 46900c9269a5SDavid Hildenbrand * alignment gap between normal and device memory regions 46910c9269a5SDavid Hildenbrand */ 4692b0c14ec4SDavid Hildenbrand ram_top = MACHINE(spapr)->device_memory->base + 4693b0c14ec4SDavid Hildenbrand memory_region_size(&MACHINE(spapr)->device_memory->mr); 4694357d1e3bSDavid Gibson } 4695357d1e3bSDavid Gibson 4696357d1e3bSDavid Gibson phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment); 4697357d1e3bSDavid Gibson 4698357d1e3bSDavid Gibson if (index > max_index) { 4699357d1e3bSDavid Gibson error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)", 4700357d1e3bSDavid Gibson max_index); 4701357d1e3bSDavid Gibson return; 4702357d1e3bSDavid Gibson } 4703357d1e3bSDavid Gibson 4704357d1e3bSDavid Gibson *buid = base_buid + index; 4705357d1e3bSDavid Gibson for (i = 0; i < n_dma; ++i) { 4706357d1e3bSDavid Gibson liobns[i] = SPAPR_PCI_LIOBN(index, i); 4707357d1e3bSDavid Gibson } 4708357d1e3bSDavid Gibson 4709357d1e3bSDavid Gibson phb_base = phb0_base + index * phb_spacing; 4710357d1e3bSDavid Gibson *pio = phb_base + pio_offset; 4711357d1e3bSDavid Gibson *mmio32 = phb_base + mmio_offset; 4712357d1e3bSDavid Gibson /* 4713357d1e3bSDavid Gibson * We don't set the 64-bit MMIO window, relying on the PHB's 4714357d1e3bSDavid Gibson * fallback behaviour of automatically splitting a large "32-bit" 4715357d1e3bSDavid Gibson * window into contiguous 32-bit and 64-bit windows 4716357d1e3bSDavid Gibson */ 4717ec132efaSAlexey Kardashevskiy 4718ec132efaSAlexey Kardashevskiy *nv2gpa = 0; 4719ec132efaSAlexey Kardashevskiy *nv2atsd = 0; 4720357d1e3bSDavid Gibson } 4721db800b21SDavid Gibson 47221ea1eefcSBharata B Rao static void spapr_machine_2_7_class_options(MachineClass *mc) 47231ea1eefcSBharata B Rao { 4724ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 472588cbe073SMarc-André Lureau static GlobalProperty compat[] = { 47266c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", }, 47276c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", }, 47286c36bddfSEduardo Habkost { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", }, 47296c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", }, 473088cbe073SMarc-André Lureau }; 47313daa4a9fSThomas Huth 4732db800b21SDavid Gibson spapr_machine_2_8_class_options(mc); 47332e9c10ebSIgor Mammedov mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3"); 4734a140c199SEduardo Habkost mc->default_machine_opts = "modern-hotplug-events=off"; 47355a995064SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); 473688cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 4737357d1e3bSDavid Gibson smc->phb_placement = phb_placement_2_7; 47381ea1eefcSBharata B Rao } 47391ea1eefcSBharata B Rao 4740db800b21SDavid Gibson DEFINE_SPAPR_MACHINE(2_7, "2.7", false); 47411ea1eefcSBharata B Rao 47421ea1eefcSBharata B Rao /* 47434b23699cSDavid Gibson * pseries-2.6 47444b23699cSDavid Gibson */ 474588cbe073SMarc-André Lureau 474688cbe073SMarc-André Lureau static void spapr_machine_2_6_class_options(MachineClass *mc) 474788cbe073SMarc-André Lureau { 474888cbe073SMarc-André Lureau static GlobalProperty compat[] = { 47496c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" }, 4750fa386d98SMarc-André Lureau }; 47511ea1eefcSBharata B Rao 47521ea1eefcSBharata B Rao spapr_machine_2_7_class_options(mc); 4753c5514d0eSIgor Mammedov mc->has_hotpluggable_cpus = false; 4754ff8f261fSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); 475588cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 47564b23699cSDavid Gibson } 47574b23699cSDavid Gibson 47581ea1eefcSBharata B Rao DEFINE_SPAPR_MACHINE(2_6, "2.6", false); 47594b23699cSDavid Gibson 47604b23699cSDavid Gibson /* 47611c5f29bbSDavid Gibson * pseries-2.5 47621c5f29bbSDavid Gibson */ 476388cbe073SMarc-André Lureau 476488cbe073SMarc-André Lureau static void spapr_machine_2_5_class_options(MachineClass *mc) 476588cbe073SMarc-André Lureau { 4766ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 476788cbe073SMarc-André Lureau static GlobalProperty compat[] = { 47686c36bddfSEduardo Habkost { "spapr-vlan", "use-rx-buffer-pools", "off" }, 4769fa386d98SMarc-André Lureau }; 47704b23699cSDavid Gibson 47714b23699cSDavid Gibson spapr_machine_2_6_class_options(mc); 477257040d45SThomas Huth smc->use_ohci_by_default = true; 4773fe759610SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len); 477488cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 47751c5f29bbSDavid Gibson } 47761c5f29bbSDavid Gibson 47774b23699cSDavid Gibson DEFINE_SPAPR_MACHINE(2_5, "2.5", false); 47781c5f29bbSDavid Gibson 47791c5f29bbSDavid Gibson /* 47801c5f29bbSDavid Gibson * pseries-2.4 47811c5f29bbSDavid Gibson */ 478280fd50f9SCornelia Huck 47835013c547SDavid Gibson static void spapr_machine_2_4_class_options(MachineClass *mc) 47845013c547SDavid Gibson { 4785ce2918cbSDavid Gibson SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); 4786fc9f38c3SDavid Gibson 4787fc9f38c3SDavid Gibson spapr_machine_2_5_class_options(mc); 4788fc9f38c3SDavid Gibson smc->dr_lmb_enabled = false; 47892f99b9c2SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len); 47901c5f29bbSDavid Gibson } 47911c5f29bbSDavid Gibson 4792fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_4, "2.4", false); 47931c5f29bbSDavid Gibson 47941c5f29bbSDavid Gibson /* 47951c5f29bbSDavid Gibson * pseries-2.3 47961c5f29bbSDavid Gibson */ 479788cbe073SMarc-André Lureau 479888cbe073SMarc-André Lureau static void spapr_machine_2_3_class_options(MachineClass *mc) 479988cbe073SMarc-André Lureau { 480088cbe073SMarc-André Lureau static GlobalProperty compat[] = { 48016c36bddfSEduardo Habkost { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" }, 4802fa386d98SMarc-André Lureau }; 4803fc9f38c3SDavid Gibson spapr_machine_2_4_class_options(mc); 48048995dd90SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len); 480588cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 48061c5f29bbSDavid Gibson } 4807fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_3, "2.3", false); 48081c5f29bbSDavid Gibson 48091c5f29bbSDavid Gibson /* 48101c5f29bbSDavid Gibson * pseries-2.2 48111c5f29bbSDavid Gibson */ 481288cbe073SMarc-André Lureau 481388cbe073SMarc-André Lureau static void spapr_machine_2_2_class_options(MachineClass *mc) 481488cbe073SMarc-André Lureau { 481588cbe073SMarc-André Lureau static GlobalProperty compat[] = { 48166c36bddfSEduardo Habkost { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" }, 4817fa386d98SMarc-André Lureau }; 4818b194df47SAlexey Kardashevskiy 4819fc9f38c3SDavid Gibson spapr_machine_2_3_class_options(mc); 48201c30044eSMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len); 482188cbe073SMarc-André Lureau compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); 4822f6d0656bSEduardo Habkost mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on"; 48231c5f29bbSDavid Gibson } 4824fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_2, "2.2", false); 48251c5f29bbSDavid Gibson 48261c5f29bbSDavid Gibson /* 48271c5f29bbSDavid Gibson * pseries-2.1 48281c5f29bbSDavid Gibson */ 48291c5f29bbSDavid Gibson 48305013c547SDavid Gibson static void spapr_machine_2_1_class_options(MachineClass *mc) 4831b0e966d0SJason Wang { 4832fc9f38c3SDavid Gibson spapr_machine_2_2_class_options(mc); 4833c4fc5695SMarc-André Lureau compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len); 48346026db45SAlexey Kardashevskiy } 4835fccbc785SDavid Gibson DEFINE_SPAPR_MACHINE(2_1, "2.1", false); 48366026db45SAlexey Kardashevskiy 483729ee3247SAlexey Kardashevskiy static void spapr_machine_register_types(void) 483829ee3247SAlexey Kardashevskiy { 483929ee3247SAlexey Kardashevskiy type_register_static(&spapr_machine_info); 484029ee3247SAlexey Kardashevskiy } 484129ee3247SAlexey Kardashevskiy 484229ee3247SAlexey Kardashevskiy type_init(spapr_machine_register_types) 4843