1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * Copyright (c) 2004-2007 Fabrice Bellard 5 * Copyright (c) 2007 Jocelyn Mayer 6 * Copyright (c) 2010 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 #include "sysemu/sysemu.h" 28 #include "hw/hw.h" 29 #include "hw/fw-path-provider.h" 30 #include "elf.h" 31 #include "net/net.h" 32 #include "sysemu/blockdev.h" 33 #include "sysemu/cpus.h" 34 #include "sysemu/kvm.h" 35 #include "kvm_ppc.h" 36 #include "mmu-hash64.h" 37 #include "qom/cpu.h" 38 39 #include "hw/boards.h" 40 #include "hw/ppc/ppc.h" 41 #include "hw/loader.h" 42 43 #include "hw/ppc/spapr.h" 44 #include "hw/ppc/spapr_vio.h" 45 #include "hw/pci-host/spapr.h" 46 #include "hw/ppc/xics.h" 47 #include "hw/pci/msi.h" 48 49 #include "hw/pci/pci.h" 50 #include "hw/scsi/scsi.h" 51 #include "hw/virtio/virtio-scsi.h" 52 53 #include "exec/address-spaces.h" 54 #include "hw/usb.h" 55 #include "qemu/config-file.h" 56 #include "qemu/error-report.h" 57 #include "trace.h" 58 59 #include <libfdt.h> 60 61 /* SLOF memory layout: 62 * 63 * SLOF raw image loaded at 0, copies its romfs right below the flat 64 * device-tree, then position SLOF itself 31M below that 65 * 66 * So we set FW_OVERHEAD to 40MB which should account for all of that 67 * and more 68 * 69 * We load our kernel at 4M, leaving space for SLOF initial image 70 */ 71 #define FDT_MAX_SIZE 0x40000 72 #define RTAS_MAX_SIZE 0x10000 73 #define FW_MAX_SIZE 0x400000 74 #define FW_FILE_NAME "slof.bin" 75 #define FW_OVERHEAD 0x2800000 76 #define KERNEL_LOAD_ADDR FW_MAX_SIZE 77 78 #define MIN_RMA_SLOF 128UL 79 80 #define TIMEBASE_FREQ 512000000ULL 81 82 #define MAX_CPUS 256 83 #define XICS_IRQS 1024 84 85 #define PHANDLE_XICP 0x00001111 86 87 #define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift)) 88 89 #define TYPE_SPAPR_MACHINE "spapr-machine" 90 91 sPAPREnvironment *spapr; 92 93 int spapr_allocate_irq(int hint, bool lsi) 94 { 95 int irq; 96 97 if (hint) { 98 irq = hint; 99 if (hint >= spapr->next_irq) { 100 spapr->next_irq = hint + 1; 101 } 102 /* FIXME: we should probably check for collisions somehow */ 103 } else { 104 irq = spapr->next_irq++; 105 } 106 107 /* Configure irq type */ 108 if (!xics_get_qirq(spapr->icp, irq)) { 109 return 0; 110 } 111 112 xics_set_irq_type(spapr->icp, irq, lsi); 113 114 return irq; 115 } 116 117 /* 118 * Allocate block of consequtive IRQs, returns a number of the first. 119 * If msi==true, aligns the first IRQ number to num. 120 */ 121 int spapr_allocate_irq_block(int num, bool lsi, bool msi) 122 { 123 int first = -1; 124 int i, hint = 0; 125 126 /* 127 * MSIMesage::data is used for storing VIRQ so 128 * it has to be aligned to num to support multiple 129 * MSI vectors. MSI-X is not affected by this. 130 * The hint is used for the first IRQ, the rest should 131 * be allocated continuously. 132 */ 133 if (msi) { 134 assert((num == 1) || (num == 2) || (num == 4) || 135 (num == 8) || (num == 16) || (num == 32)); 136 hint = (spapr->next_irq + num - 1) & ~(num - 1); 137 } 138 139 for (i = 0; i < num; ++i) { 140 int irq; 141 142 irq = spapr_allocate_irq(hint, lsi); 143 if (!irq) { 144 return -1; 145 } 146 147 if (0 == i) { 148 first = irq; 149 hint = 0; 150 } 151 152 /* If the above doesn't create a consecutive block then that's 153 * an internal bug */ 154 assert(irq == (first + i)); 155 } 156 157 return first; 158 } 159 160 static XICSState *try_create_xics(const char *type, int nr_servers, 161 int nr_irqs) 162 { 163 DeviceState *dev; 164 165 dev = qdev_create(NULL, type); 166 qdev_prop_set_uint32(dev, "nr_servers", nr_servers); 167 qdev_prop_set_uint32(dev, "nr_irqs", nr_irqs); 168 if (qdev_init(dev) < 0) { 169 return NULL; 170 } 171 172 return XICS_COMMON(dev); 173 } 174 175 static XICSState *xics_system_init(int nr_servers, int nr_irqs) 176 { 177 XICSState *icp = NULL; 178 179 if (kvm_enabled()) { 180 QemuOpts *machine_opts = qemu_get_machine_opts(); 181 bool irqchip_allowed = qemu_opt_get_bool(machine_opts, 182 "kernel_irqchip", true); 183 bool irqchip_required = qemu_opt_get_bool(machine_opts, 184 "kernel_irqchip", false); 185 if (irqchip_allowed) { 186 icp = try_create_xics(TYPE_KVM_XICS, nr_servers, nr_irqs); 187 } 188 189 if (irqchip_required && !icp) { 190 perror("Failed to create in-kernel XICS\n"); 191 abort(); 192 } 193 } 194 195 if (!icp) { 196 icp = try_create_xics(TYPE_XICS, nr_servers, nr_irqs); 197 } 198 199 if (!icp) { 200 perror("Failed to create XICS\n"); 201 abort(); 202 } 203 204 return icp; 205 } 206 207 static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, 208 int smt_threads) 209 { 210 int i, ret = 0; 211 uint32_t servers_prop[smt_threads]; 212 uint32_t gservers_prop[smt_threads * 2]; 213 int index = ppc_get_vcpu_dt_id(cpu); 214 215 if (cpu->cpu_version) { 216 ret = fdt_setprop(fdt, offset, "cpu-version", 217 &cpu->cpu_version, sizeof(cpu->cpu_version)); 218 if (ret < 0) { 219 return ret; 220 } 221 } 222 223 /* Build interrupt servers and gservers properties */ 224 for (i = 0; i < smt_threads; i++) { 225 servers_prop[i] = cpu_to_be32(index + i); 226 /* Hack, direct the group queues back to cpu 0 */ 227 gservers_prop[i*2] = cpu_to_be32(index + i); 228 gservers_prop[i*2 + 1] = 0; 229 } 230 ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", 231 servers_prop, sizeof(servers_prop)); 232 if (ret < 0) { 233 return ret; 234 } 235 ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s", 236 gservers_prop, sizeof(gservers_prop)); 237 238 return ret; 239 } 240 241 static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr) 242 { 243 int ret = 0, offset, cpus_offset; 244 CPUState *cs; 245 char cpu_model[32]; 246 int smt = kvmppc_smt_threads(); 247 uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; 248 249 CPU_FOREACH(cs) { 250 PowerPCCPU *cpu = POWERPC_CPU(cs); 251 DeviceClass *dc = DEVICE_GET_CLASS(cs); 252 int index = ppc_get_vcpu_dt_id(cpu); 253 uint32_t associativity[] = {cpu_to_be32(0x5), 254 cpu_to_be32(0x0), 255 cpu_to_be32(0x0), 256 cpu_to_be32(0x0), 257 cpu_to_be32(cs->numa_node), 258 cpu_to_be32(index)}; 259 260 if ((index % smt) != 0) { 261 continue; 262 } 263 264 snprintf(cpu_model, 32, "%s@%x", dc->fw_name, index); 265 266 cpus_offset = fdt_path_offset(fdt, "/cpus"); 267 if (cpus_offset < 0) { 268 cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), 269 "cpus"); 270 if (cpus_offset < 0) { 271 return cpus_offset; 272 } 273 } 274 offset = fdt_subnode_offset(fdt, cpus_offset, cpu_model); 275 if (offset < 0) { 276 offset = fdt_add_subnode(fdt, cpus_offset, cpu_model); 277 if (offset < 0) { 278 return offset; 279 } 280 } 281 282 if (nb_numa_nodes > 1) { 283 ret = fdt_setprop(fdt, offset, "ibm,associativity", associativity, 284 sizeof(associativity)); 285 if (ret < 0) { 286 return ret; 287 } 288 } 289 290 ret = fdt_setprop(fdt, offset, "ibm,pft-size", 291 pft_size_prop, sizeof(pft_size_prop)); 292 if (ret < 0) { 293 return ret; 294 } 295 296 ret = spapr_fixup_cpu_smt_dt(fdt, offset, cpu, 297 ppc_get_compat_smt_threads(cpu)); 298 if (ret < 0) { 299 return ret; 300 } 301 } 302 return ret; 303 } 304 305 306 static size_t create_page_sizes_prop(CPUPPCState *env, uint32_t *prop, 307 size_t maxsize) 308 { 309 size_t maxcells = maxsize / sizeof(uint32_t); 310 int i, j, count; 311 uint32_t *p = prop; 312 313 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 314 struct ppc_one_seg_page_size *sps = &env->sps.sps[i]; 315 316 if (!sps->page_shift) { 317 break; 318 } 319 for (count = 0; count < PPC_PAGE_SIZES_MAX_SZ; count++) { 320 if (sps->enc[count].page_shift == 0) { 321 break; 322 } 323 } 324 if ((p - prop) >= (maxcells - 3 - count * 2)) { 325 break; 326 } 327 *(p++) = cpu_to_be32(sps->page_shift); 328 *(p++) = cpu_to_be32(sps->slb_enc); 329 *(p++) = cpu_to_be32(count); 330 for (j = 0; j < count; j++) { 331 *(p++) = cpu_to_be32(sps->enc[j].page_shift); 332 *(p++) = cpu_to_be32(sps->enc[j].pte_enc); 333 } 334 } 335 336 return (p - prop) * sizeof(uint32_t); 337 } 338 339 #define _FDT(exp) \ 340 do { \ 341 int ret = (exp); \ 342 if (ret < 0) { \ 343 fprintf(stderr, "qemu: error creating device tree: %s: %s\n", \ 344 #exp, fdt_strerror(ret)); \ 345 exit(1); \ 346 } \ 347 } while (0) 348 349 350 static void *spapr_create_fdt_skel(hwaddr initrd_base, 351 hwaddr initrd_size, 352 hwaddr kernel_size, 353 bool little_endian, 354 const char *boot_device, 355 const char *kernel_cmdline, 356 uint32_t epow_irq) 357 { 358 void *fdt; 359 CPUState *cs; 360 uint32_t start_prop = cpu_to_be32(initrd_base); 361 uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size); 362 char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt" 363 "\0hcall-tce\0hcall-vio\0hcall-splpar\0hcall-bulk\0hcall-set-mode"; 364 char qemu_hypertas_prop[] = "hcall-memop1"; 365 uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)}; 366 uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)}; 367 int smt = kvmppc_smt_threads(); 368 unsigned char vec5[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x80}; 369 QemuOpts *opts = qemu_opts_find(qemu_find_opts("smp-opts"), NULL); 370 unsigned sockets = opts ? qemu_opt_get_number(opts, "sockets", 0) : 0; 371 uint32_t cpus_per_socket = sockets ? (smp_cpus / sockets) : 1; 372 373 fdt = g_malloc0(FDT_MAX_SIZE); 374 _FDT((fdt_create(fdt, FDT_MAX_SIZE))); 375 376 if (kernel_size) { 377 _FDT((fdt_add_reservemap_entry(fdt, KERNEL_LOAD_ADDR, kernel_size))); 378 } 379 if (initrd_size) { 380 _FDT((fdt_add_reservemap_entry(fdt, initrd_base, initrd_size))); 381 } 382 _FDT((fdt_finish_reservemap(fdt))); 383 384 /* Root node */ 385 _FDT((fdt_begin_node(fdt, ""))); 386 _FDT((fdt_property_string(fdt, "device_type", "chrp"))); 387 _FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)"))); 388 _FDT((fdt_property_string(fdt, "compatible", "qemu,pseries"))); 389 390 _FDT((fdt_property_cell(fdt, "#address-cells", 0x2))); 391 _FDT((fdt_property_cell(fdt, "#size-cells", 0x2))); 392 393 /* /chosen */ 394 _FDT((fdt_begin_node(fdt, "chosen"))); 395 396 /* Set Form1_affinity */ 397 _FDT((fdt_property(fdt, "ibm,architecture-vec-5", vec5, sizeof(vec5)))); 398 399 _FDT((fdt_property_string(fdt, "bootargs", kernel_cmdline))); 400 _FDT((fdt_property(fdt, "linux,initrd-start", 401 &start_prop, sizeof(start_prop)))); 402 _FDT((fdt_property(fdt, "linux,initrd-end", 403 &end_prop, sizeof(end_prop)))); 404 if (kernel_size) { 405 uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR), 406 cpu_to_be64(kernel_size) }; 407 408 _FDT((fdt_property(fdt, "qemu,boot-kernel", &kprop, sizeof(kprop)))); 409 if (little_endian) { 410 _FDT((fdt_property(fdt, "qemu,boot-kernel-le", NULL, 0))); 411 } 412 } 413 if (boot_device) { 414 _FDT((fdt_property_string(fdt, "qemu,boot-device", boot_device))); 415 } 416 _FDT((fdt_property_cell(fdt, "qemu,graphic-width", graphic_width))); 417 _FDT((fdt_property_cell(fdt, "qemu,graphic-height", graphic_height))); 418 _FDT((fdt_property_cell(fdt, "qemu,graphic-depth", graphic_depth))); 419 420 _FDT((fdt_end_node(fdt))); 421 422 /* cpus */ 423 _FDT((fdt_begin_node(fdt, "cpus"))); 424 425 _FDT((fdt_property_cell(fdt, "#address-cells", 0x1))); 426 _FDT((fdt_property_cell(fdt, "#size-cells", 0x0))); 427 428 CPU_FOREACH(cs) { 429 PowerPCCPU *cpu = POWERPC_CPU(cs); 430 CPUPPCState *env = &cpu->env; 431 DeviceClass *dc = DEVICE_GET_CLASS(cs); 432 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 433 int index = ppc_get_vcpu_dt_id(cpu); 434 char *nodename; 435 uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 436 0xffffffff, 0xffffffff}; 437 uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TIMEBASE_FREQ; 438 uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; 439 uint32_t page_sizes_prop[64]; 440 size_t page_sizes_prop_size; 441 442 if ((index % smt) != 0) { 443 continue; 444 } 445 446 nodename = g_strdup_printf("%s@%x", dc->fw_name, index); 447 448 _FDT((fdt_begin_node(fdt, nodename))); 449 450 g_free(nodename); 451 452 _FDT((fdt_property_cell(fdt, "reg", index))); 453 _FDT((fdt_property_string(fdt, "device_type", "cpu"))); 454 455 _FDT((fdt_property_cell(fdt, "cpu-version", env->spr[SPR_PVR]))); 456 _FDT((fdt_property_cell(fdt, "d-cache-block-size", 457 env->dcache_line_size))); 458 _FDT((fdt_property_cell(fdt, "d-cache-line-size", 459 env->dcache_line_size))); 460 _FDT((fdt_property_cell(fdt, "i-cache-block-size", 461 env->icache_line_size))); 462 _FDT((fdt_property_cell(fdt, "i-cache-line-size", 463 env->icache_line_size))); 464 465 if (pcc->l1_dcache_size) { 466 _FDT((fdt_property_cell(fdt, "d-cache-size", pcc->l1_dcache_size))); 467 } else { 468 fprintf(stderr, "Warning: Unknown L1 dcache size for cpu\n"); 469 } 470 if (pcc->l1_icache_size) { 471 _FDT((fdt_property_cell(fdt, "i-cache-size", pcc->l1_icache_size))); 472 } else { 473 fprintf(stderr, "Warning: Unknown L1 icache size for cpu\n"); 474 } 475 476 _FDT((fdt_property_cell(fdt, "timebase-frequency", tbfreq))); 477 _FDT((fdt_property_cell(fdt, "clock-frequency", cpufreq))); 478 _FDT((fdt_property_cell(fdt, "ibm,slb-size", env->slb_nr))); 479 _FDT((fdt_property_string(fdt, "status", "okay"))); 480 _FDT((fdt_property(fdt, "64-bit", NULL, 0))); 481 482 if (env->spr_cb[SPR_PURR].oea_read) { 483 _FDT((fdt_property(fdt, "ibm,purr", NULL, 0))); 484 } 485 486 if (env->mmu_model & POWERPC_MMU_1TSEG) { 487 _FDT((fdt_property(fdt, "ibm,processor-segment-sizes", 488 segs, sizeof(segs)))); 489 } 490 491 /* Advertise VMX/VSX (vector extensions) if available 492 * 0 / no property == no vector extensions 493 * 1 == VMX / Altivec available 494 * 2 == VSX available */ 495 if (env->insns_flags & PPC_ALTIVEC) { 496 uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1; 497 498 _FDT((fdt_property_cell(fdt, "ibm,vmx", vmx))); 499 } 500 501 /* Advertise DFP (Decimal Floating Point) if available 502 * 0 / no property == no DFP 503 * 1 == DFP available */ 504 if (env->insns_flags2 & PPC2_DFP) { 505 _FDT((fdt_property_cell(fdt, "ibm,dfp", 1))); 506 } 507 508 page_sizes_prop_size = create_page_sizes_prop(env, page_sizes_prop, 509 sizeof(page_sizes_prop)); 510 if (page_sizes_prop_size) { 511 _FDT((fdt_property(fdt, "ibm,segment-page-sizes", 512 page_sizes_prop, page_sizes_prop_size))); 513 } 514 515 _FDT((fdt_property_cell(fdt, "ibm,chip-id", 516 cs->cpu_index / cpus_per_socket))); 517 518 _FDT((fdt_end_node(fdt))); 519 } 520 521 _FDT((fdt_end_node(fdt))); 522 523 /* RTAS */ 524 _FDT((fdt_begin_node(fdt, "rtas"))); 525 526 _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas_prop, 527 sizeof(hypertas_prop)))); 528 _FDT((fdt_property(fdt, "qemu,hypertas-functions", qemu_hypertas_prop, 529 sizeof(qemu_hypertas_prop)))); 530 531 _FDT((fdt_property(fdt, "ibm,associativity-reference-points", 532 refpoints, sizeof(refpoints)))); 533 534 _FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX))); 535 536 _FDT((fdt_end_node(fdt))); 537 538 /* interrupt controller */ 539 _FDT((fdt_begin_node(fdt, "interrupt-controller"))); 540 541 _FDT((fdt_property_string(fdt, "device_type", 542 "PowerPC-External-Interrupt-Presentation"))); 543 _FDT((fdt_property_string(fdt, "compatible", "IBM,ppc-xicp"))); 544 _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0))); 545 _FDT((fdt_property(fdt, "ibm,interrupt-server-ranges", 546 interrupt_server_ranges_prop, 547 sizeof(interrupt_server_ranges_prop)))); 548 _FDT((fdt_property_cell(fdt, "#interrupt-cells", 2))); 549 _FDT((fdt_property_cell(fdt, "linux,phandle", PHANDLE_XICP))); 550 _FDT((fdt_property_cell(fdt, "phandle", PHANDLE_XICP))); 551 552 _FDT((fdt_end_node(fdt))); 553 554 /* vdevice */ 555 _FDT((fdt_begin_node(fdt, "vdevice"))); 556 557 _FDT((fdt_property_string(fdt, "device_type", "vdevice"))); 558 _FDT((fdt_property_string(fdt, "compatible", "IBM,vdevice"))); 559 _FDT((fdt_property_cell(fdt, "#address-cells", 0x1))); 560 _FDT((fdt_property_cell(fdt, "#size-cells", 0x0))); 561 _FDT((fdt_property_cell(fdt, "#interrupt-cells", 0x2))); 562 _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0))); 563 564 _FDT((fdt_end_node(fdt))); 565 566 /* event-sources */ 567 spapr_events_fdt_skel(fdt, epow_irq); 568 569 _FDT((fdt_end_node(fdt))); /* close root node */ 570 _FDT((fdt_finish(fdt))); 571 572 return fdt; 573 } 574 575 int spapr_h_cas_compose_response(target_ulong addr, target_ulong size) 576 { 577 void *fdt, *fdt_skel; 578 sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 }; 579 580 size -= sizeof(hdr); 581 582 /* Create sceleton */ 583 fdt_skel = g_malloc0(size); 584 _FDT((fdt_create(fdt_skel, size))); 585 _FDT((fdt_begin_node(fdt_skel, ""))); 586 _FDT((fdt_end_node(fdt_skel))); 587 _FDT((fdt_finish(fdt_skel))); 588 fdt = g_malloc0(size); 589 _FDT((fdt_open_into(fdt_skel, fdt, size))); 590 g_free(fdt_skel); 591 592 /* Fix skeleton up */ 593 _FDT((spapr_fixup_cpu_dt(fdt, spapr))); 594 595 /* Pack resulting tree */ 596 _FDT((fdt_pack(fdt))); 597 598 if (fdt_totalsize(fdt) + sizeof(hdr) > size) { 599 trace_spapr_cas_failed(size); 600 return -1; 601 } 602 603 cpu_physical_memory_write(addr, &hdr, sizeof(hdr)); 604 cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt)); 605 trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr)); 606 g_free(fdt); 607 608 return 0; 609 } 610 611 static int spapr_populate_memory(sPAPREnvironment *spapr, void *fdt) 612 { 613 uint32_t associativity[] = {cpu_to_be32(0x4), cpu_to_be32(0x0), 614 cpu_to_be32(0x0), cpu_to_be32(0x0), 615 cpu_to_be32(0x0)}; 616 char mem_name[32]; 617 hwaddr node0_size, mem_start, node_size; 618 uint64_t mem_reg_property[2]; 619 int i, off; 620 621 /* memory node(s) */ 622 if (nb_numa_nodes > 1 && node_mem[0] < ram_size) { 623 node0_size = node_mem[0]; 624 } else { 625 node0_size = ram_size; 626 } 627 628 /* RMA */ 629 mem_reg_property[0] = 0; 630 mem_reg_property[1] = cpu_to_be64(spapr->rma_size); 631 off = fdt_add_subnode(fdt, 0, "memory@0"); 632 _FDT(off); 633 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 634 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 635 sizeof(mem_reg_property)))); 636 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, 637 sizeof(associativity)))); 638 639 /* RAM: Node 0 */ 640 if (node0_size > spapr->rma_size) { 641 mem_reg_property[0] = cpu_to_be64(spapr->rma_size); 642 mem_reg_property[1] = cpu_to_be64(node0_size - spapr->rma_size); 643 644 sprintf(mem_name, "memory@" TARGET_FMT_lx, spapr->rma_size); 645 off = fdt_add_subnode(fdt, 0, mem_name); 646 _FDT(off); 647 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 648 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 649 sizeof(mem_reg_property)))); 650 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, 651 sizeof(associativity)))); 652 } 653 654 /* RAM: Node 1 and beyond */ 655 mem_start = node0_size; 656 for (i = 1; i < nb_numa_nodes; i++) { 657 mem_reg_property[0] = cpu_to_be64(mem_start); 658 if (mem_start >= ram_size) { 659 node_size = 0; 660 } else { 661 node_size = node_mem[i]; 662 if (node_size > ram_size - mem_start) { 663 node_size = ram_size - mem_start; 664 } 665 } 666 mem_reg_property[1] = cpu_to_be64(node_size); 667 associativity[3] = associativity[4] = cpu_to_be32(i); 668 sprintf(mem_name, "memory@" TARGET_FMT_lx, mem_start); 669 off = fdt_add_subnode(fdt, 0, mem_name); 670 _FDT(off); 671 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 672 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 673 sizeof(mem_reg_property)))); 674 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, 675 sizeof(associativity)))); 676 mem_start += node_size; 677 } 678 679 return 0; 680 } 681 682 static void spapr_finalize_fdt(sPAPREnvironment *spapr, 683 hwaddr fdt_addr, 684 hwaddr rtas_addr, 685 hwaddr rtas_size) 686 { 687 int ret, i; 688 size_t cb = 0; 689 char *bootlist; 690 void *fdt; 691 sPAPRPHBState *phb; 692 693 fdt = g_malloc(FDT_MAX_SIZE); 694 695 /* open out the base tree into a temp buffer for the final tweaks */ 696 _FDT((fdt_open_into(spapr->fdt_skel, fdt, FDT_MAX_SIZE))); 697 698 ret = spapr_populate_memory(spapr, fdt); 699 if (ret < 0) { 700 fprintf(stderr, "couldn't setup memory nodes in fdt\n"); 701 exit(1); 702 } 703 704 ret = spapr_populate_vdevice(spapr->vio_bus, fdt); 705 if (ret < 0) { 706 fprintf(stderr, "couldn't setup vio devices in fdt\n"); 707 exit(1); 708 } 709 710 QLIST_FOREACH(phb, &spapr->phbs, list) { 711 ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt); 712 } 713 714 if (ret < 0) { 715 fprintf(stderr, "couldn't setup PCI devices in fdt\n"); 716 exit(1); 717 } 718 719 /* RTAS */ 720 ret = spapr_rtas_device_tree_setup(fdt, rtas_addr, rtas_size); 721 if (ret < 0) { 722 fprintf(stderr, "Couldn't set up RTAS device tree properties\n"); 723 } 724 725 /* Advertise NUMA via ibm,associativity */ 726 ret = spapr_fixup_cpu_dt(fdt, spapr); 727 if (ret < 0) { 728 fprintf(stderr, "Couldn't finalize CPU device tree properties\n"); 729 } 730 731 bootlist = get_boot_devices_list(&cb, true); 732 if (cb && bootlist) { 733 int offset = fdt_path_offset(fdt, "/chosen"); 734 if (offset < 0) { 735 exit(1); 736 } 737 for (i = 0; i < cb; i++) { 738 if (bootlist[i] == '\n') { 739 bootlist[i] = ' '; 740 } 741 742 } 743 ret = fdt_setprop_string(fdt, offset, "qemu,boot-list", bootlist); 744 } 745 746 if (!spapr->has_graphics) { 747 spapr_populate_chosen_stdout(fdt, spapr->vio_bus); 748 } 749 750 _FDT((fdt_pack(fdt))); 751 752 if (fdt_totalsize(fdt) > FDT_MAX_SIZE) { 753 hw_error("FDT too big ! 0x%x bytes (max is 0x%x)\n", 754 fdt_totalsize(fdt), FDT_MAX_SIZE); 755 exit(1); 756 } 757 758 cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); 759 760 g_free(fdt); 761 } 762 763 static uint64_t translate_kernel_address(void *opaque, uint64_t addr) 764 { 765 return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR; 766 } 767 768 static void emulate_spapr_hypercall(PowerPCCPU *cpu) 769 { 770 CPUPPCState *env = &cpu->env; 771 772 if (msr_pr) { 773 hcall_dprintf("Hypercall made with MSR[PR]=1\n"); 774 env->gpr[3] = H_PRIVILEGE; 775 } else { 776 env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); 777 } 778 } 779 780 static void spapr_reset_htab(sPAPREnvironment *spapr) 781 { 782 long shift; 783 784 /* allocate hash page table. For now we always make this 16mb, 785 * later we should probably make it scale to the size of guest 786 * RAM */ 787 788 shift = kvmppc_reset_htab(spapr->htab_shift); 789 790 if (shift > 0) { 791 /* Kernel handles htab, we don't need to allocate one */ 792 spapr->htab_shift = shift; 793 kvmppc_kern_htab = true; 794 } else { 795 if (!spapr->htab) { 796 /* Allocate an htab if we don't yet have one */ 797 spapr->htab = qemu_memalign(HTAB_SIZE(spapr), HTAB_SIZE(spapr)); 798 } 799 800 /* And clear it */ 801 memset(spapr->htab, 0, HTAB_SIZE(spapr)); 802 } 803 804 /* Update the RMA size if necessary */ 805 if (spapr->vrma_adjust) { 806 hwaddr node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size; 807 spapr->rma_size = kvmppc_rma_size(node0_size, spapr->htab_shift); 808 } 809 } 810 811 static void ppc_spapr_reset(void) 812 { 813 PowerPCCPU *first_ppc_cpu; 814 815 /* Reset the hash table & recalc the RMA */ 816 spapr_reset_htab(spapr); 817 818 qemu_devices_reset(); 819 820 /* Load the fdt */ 821 spapr_finalize_fdt(spapr, spapr->fdt_addr, spapr->rtas_addr, 822 spapr->rtas_size); 823 824 /* Set up the entry state */ 825 first_ppc_cpu = POWERPC_CPU(first_cpu); 826 first_ppc_cpu->env.gpr[3] = spapr->fdt_addr; 827 first_ppc_cpu->env.gpr[5] = 0; 828 first_cpu->halted = 0; 829 first_ppc_cpu->env.nip = spapr->entry_point; 830 831 } 832 833 static void spapr_cpu_reset(void *opaque) 834 { 835 PowerPCCPU *cpu = opaque; 836 CPUState *cs = CPU(cpu); 837 CPUPPCState *env = &cpu->env; 838 839 cpu_reset(cs); 840 841 /* All CPUs start halted. CPU0 is unhalted from the machine level 842 * reset code and the rest are explicitly started up by the guest 843 * using an RTAS call */ 844 cs->halted = 1; 845 846 env->spr[SPR_HIOR] = 0; 847 848 env->external_htab = (uint8_t *)spapr->htab; 849 if (kvm_enabled() && !env->external_htab) { 850 /* 851 * HV KVM, set external_htab to 1 so our ppc_hash64_load_hpte* 852 * functions do the right thing. 853 */ 854 env->external_htab = (void *)1; 855 } 856 env->htab_base = -1; 857 /* 858 * htab_mask is the mask used to normalize hash value to PTEG index. 859 * htab_shift is log2 of hash table size. 860 * We have 8 hpte per group, and each hpte is 16 bytes. 861 * ie have 128 bytes per hpte entry. 862 */ 863 env->htab_mask = (1ULL << ((spapr)->htab_shift - 7)) - 1; 864 env->spr[SPR_SDR1] = (target_ulong)(uintptr_t)spapr->htab | 865 (spapr->htab_shift - 18); 866 } 867 868 static void spapr_create_nvram(sPAPREnvironment *spapr) 869 { 870 DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram"); 871 DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); 872 873 if (dinfo) { 874 qdev_prop_set_drive_nofail(dev, "drive", dinfo->bdrv); 875 } 876 877 qdev_init_nofail(dev); 878 879 spapr->nvram = (struct sPAPRNVRAM *)dev; 880 } 881 882 /* Returns whether we want to use VGA or not */ 883 static int spapr_vga_init(PCIBus *pci_bus) 884 { 885 switch (vga_interface_type) { 886 case VGA_NONE: 887 return false; 888 case VGA_DEVICE: 889 return true; 890 case VGA_STD: 891 return pci_vga_init(pci_bus) != NULL; 892 default: 893 fprintf(stderr, "This vga model is not supported," 894 "currently it only supports -vga std\n"); 895 exit(0); 896 } 897 } 898 899 static const VMStateDescription vmstate_spapr = { 900 .name = "spapr", 901 .version_id = 2, 902 .minimum_version_id = 1, 903 .fields = (VMStateField[]) { 904 VMSTATE_UINT32(next_irq, sPAPREnvironment), 905 906 /* RTC offset */ 907 VMSTATE_UINT64(rtc_offset, sPAPREnvironment), 908 VMSTATE_PPC_TIMEBASE_V(tb, sPAPREnvironment, 2), 909 VMSTATE_END_OF_LIST() 910 }, 911 }; 912 913 #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) 914 #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) 915 #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) 916 #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY)) 917 918 static int htab_save_setup(QEMUFile *f, void *opaque) 919 { 920 sPAPREnvironment *spapr = opaque; 921 922 /* "Iteration" header */ 923 qemu_put_be32(f, spapr->htab_shift); 924 925 if (spapr->htab) { 926 spapr->htab_save_index = 0; 927 spapr->htab_first_pass = true; 928 } else { 929 assert(kvm_enabled()); 930 931 spapr->htab_fd = kvmppc_get_htab_fd(false); 932 if (spapr->htab_fd < 0) { 933 fprintf(stderr, "Unable to open fd for reading hash table from KVM: %s\n", 934 strerror(errno)); 935 return -1; 936 } 937 } 938 939 940 return 0; 941 } 942 943 static void htab_save_first_pass(QEMUFile *f, sPAPREnvironment *spapr, 944 int64_t max_ns) 945 { 946 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 947 int index = spapr->htab_save_index; 948 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 949 950 assert(spapr->htab_first_pass); 951 952 do { 953 int chunkstart; 954 955 /* Consume invalid HPTEs */ 956 while ((index < htabslots) 957 && !HPTE_VALID(HPTE(spapr->htab, index))) { 958 index++; 959 CLEAN_HPTE(HPTE(spapr->htab, index)); 960 } 961 962 /* Consume valid HPTEs */ 963 chunkstart = index; 964 while ((index < htabslots) 965 && HPTE_VALID(HPTE(spapr->htab, index))) { 966 index++; 967 CLEAN_HPTE(HPTE(spapr->htab, index)); 968 } 969 970 if (index > chunkstart) { 971 int n_valid = index - chunkstart; 972 973 qemu_put_be32(f, chunkstart); 974 qemu_put_be16(f, n_valid); 975 qemu_put_be16(f, 0); 976 qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), 977 HASH_PTE_SIZE_64 * n_valid); 978 979 if ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 980 break; 981 } 982 } 983 } while ((index < htabslots) && !qemu_file_rate_limit(f)); 984 985 if (index >= htabslots) { 986 assert(index == htabslots); 987 index = 0; 988 spapr->htab_first_pass = false; 989 } 990 spapr->htab_save_index = index; 991 } 992 993 static int htab_save_later_pass(QEMUFile *f, sPAPREnvironment *spapr, 994 int64_t max_ns) 995 { 996 bool final = max_ns < 0; 997 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 998 int examined = 0, sent = 0; 999 int index = spapr->htab_save_index; 1000 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1001 1002 assert(!spapr->htab_first_pass); 1003 1004 do { 1005 int chunkstart, invalidstart; 1006 1007 /* Consume non-dirty HPTEs */ 1008 while ((index < htabslots) 1009 && !HPTE_DIRTY(HPTE(spapr->htab, index))) { 1010 index++; 1011 examined++; 1012 } 1013 1014 chunkstart = index; 1015 /* Consume valid dirty HPTEs */ 1016 while ((index < htabslots) 1017 && HPTE_DIRTY(HPTE(spapr->htab, index)) 1018 && HPTE_VALID(HPTE(spapr->htab, index))) { 1019 CLEAN_HPTE(HPTE(spapr->htab, index)); 1020 index++; 1021 examined++; 1022 } 1023 1024 invalidstart = index; 1025 /* Consume invalid dirty HPTEs */ 1026 while ((index < htabslots) 1027 && HPTE_DIRTY(HPTE(spapr->htab, index)) 1028 && !HPTE_VALID(HPTE(spapr->htab, index))) { 1029 CLEAN_HPTE(HPTE(spapr->htab, index)); 1030 index++; 1031 examined++; 1032 } 1033 1034 if (index > chunkstart) { 1035 int n_valid = invalidstart - chunkstart; 1036 int n_invalid = index - invalidstart; 1037 1038 qemu_put_be32(f, chunkstart); 1039 qemu_put_be16(f, n_valid); 1040 qemu_put_be16(f, n_invalid); 1041 qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), 1042 HASH_PTE_SIZE_64 * n_valid); 1043 sent += index - chunkstart; 1044 1045 if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 1046 break; 1047 } 1048 } 1049 1050 if (examined >= htabslots) { 1051 break; 1052 } 1053 1054 if (index >= htabslots) { 1055 assert(index == htabslots); 1056 index = 0; 1057 } 1058 } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); 1059 1060 if (index >= htabslots) { 1061 assert(index == htabslots); 1062 index = 0; 1063 } 1064 1065 spapr->htab_save_index = index; 1066 1067 return (examined >= htabslots) && (sent == 0) ? 1 : 0; 1068 } 1069 1070 #define MAX_ITERATION_NS 5000000 /* 5 ms */ 1071 #define MAX_KVM_BUF_SIZE 2048 1072 1073 static int htab_save_iterate(QEMUFile *f, void *opaque) 1074 { 1075 sPAPREnvironment *spapr = opaque; 1076 int rc = 0; 1077 1078 /* Iteration header */ 1079 qemu_put_be32(f, 0); 1080 1081 if (!spapr->htab) { 1082 assert(kvm_enabled()); 1083 1084 rc = kvmppc_save_htab(f, spapr->htab_fd, 1085 MAX_KVM_BUF_SIZE, MAX_ITERATION_NS); 1086 if (rc < 0) { 1087 return rc; 1088 } 1089 } else if (spapr->htab_first_pass) { 1090 htab_save_first_pass(f, spapr, MAX_ITERATION_NS); 1091 } else { 1092 rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS); 1093 } 1094 1095 /* End marker */ 1096 qemu_put_be32(f, 0); 1097 qemu_put_be16(f, 0); 1098 qemu_put_be16(f, 0); 1099 1100 return rc; 1101 } 1102 1103 static int htab_save_complete(QEMUFile *f, void *opaque) 1104 { 1105 sPAPREnvironment *spapr = opaque; 1106 1107 /* Iteration header */ 1108 qemu_put_be32(f, 0); 1109 1110 if (!spapr->htab) { 1111 int rc; 1112 1113 assert(kvm_enabled()); 1114 1115 rc = kvmppc_save_htab(f, spapr->htab_fd, MAX_KVM_BUF_SIZE, -1); 1116 if (rc < 0) { 1117 return rc; 1118 } 1119 close(spapr->htab_fd); 1120 spapr->htab_fd = -1; 1121 } else { 1122 htab_save_later_pass(f, spapr, -1); 1123 } 1124 1125 /* End marker */ 1126 qemu_put_be32(f, 0); 1127 qemu_put_be16(f, 0); 1128 qemu_put_be16(f, 0); 1129 1130 return 0; 1131 } 1132 1133 static int htab_load(QEMUFile *f, void *opaque, int version_id) 1134 { 1135 sPAPREnvironment *spapr = opaque; 1136 uint32_t section_hdr; 1137 int fd = -1; 1138 1139 if (version_id < 1 || version_id > 1) { 1140 fprintf(stderr, "htab_load() bad version\n"); 1141 return -EINVAL; 1142 } 1143 1144 section_hdr = qemu_get_be32(f); 1145 1146 if (section_hdr) { 1147 /* First section, just the hash shift */ 1148 if (spapr->htab_shift != section_hdr) { 1149 return -EINVAL; 1150 } 1151 return 0; 1152 } 1153 1154 if (!spapr->htab) { 1155 assert(kvm_enabled()); 1156 1157 fd = kvmppc_get_htab_fd(true); 1158 if (fd < 0) { 1159 fprintf(stderr, "Unable to open fd to restore KVM hash table: %s\n", 1160 strerror(errno)); 1161 } 1162 } 1163 1164 while (true) { 1165 uint32_t index; 1166 uint16_t n_valid, n_invalid; 1167 1168 index = qemu_get_be32(f); 1169 n_valid = qemu_get_be16(f); 1170 n_invalid = qemu_get_be16(f); 1171 1172 if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) { 1173 /* End of Stream */ 1174 break; 1175 } 1176 1177 if ((index + n_valid + n_invalid) > 1178 (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) { 1179 /* Bad index in stream */ 1180 fprintf(stderr, "htab_load() bad index %d (%hd+%hd entries) " 1181 "in htab stream (htab_shift=%d)\n", index, n_valid, n_invalid, 1182 spapr->htab_shift); 1183 return -EINVAL; 1184 } 1185 1186 if (spapr->htab) { 1187 if (n_valid) { 1188 qemu_get_buffer(f, HPTE(spapr->htab, index), 1189 HASH_PTE_SIZE_64 * n_valid); 1190 } 1191 if (n_invalid) { 1192 memset(HPTE(spapr->htab, index + n_valid), 0, 1193 HASH_PTE_SIZE_64 * n_invalid); 1194 } 1195 } else { 1196 int rc; 1197 1198 assert(fd >= 0); 1199 1200 rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid); 1201 if (rc < 0) { 1202 return rc; 1203 } 1204 } 1205 } 1206 1207 if (!spapr->htab) { 1208 assert(fd >= 0); 1209 close(fd); 1210 } 1211 1212 return 0; 1213 } 1214 1215 static SaveVMHandlers savevm_htab_handlers = { 1216 .save_live_setup = htab_save_setup, 1217 .save_live_iterate = htab_save_iterate, 1218 .save_live_complete = htab_save_complete, 1219 .load_state = htab_load, 1220 }; 1221 1222 /* pSeries LPAR / sPAPR hardware init */ 1223 static void ppc_spapr_init(MachineState *machine) 1224 { 1225 ram_addr_t ram_size = machine->ram_size; 1226 const char *cpu_model = machine->cpu_model; 1227 const char *kernel_filename = machine->kernel_filename; 1228 const char *kernel_cmdline = machine->kernel_cmdline; 1229 const char *initrd_filename = machine->initrd_filename; 1230 const char *boot_device = machine->boot_order; 1231 PowerPCCPU *cpu; 1232 CPUPPCState *env; 1233 PCIHostState *phb; 1234 int i; 1235 MemoryRegion *sysmem = get_system_memory(); 1236 MemoryRegion *ram = g_new(MemoryRegion, 1); 1237 hwaddr rma_alloc_size; 1238 hwaddr node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size; 1239 uint32_t initrd_base = 0; 1240 long kernel_size = 0, initrd_size = 0; 1241 long load_limit, rtas_limit, fw_size; 1242 bool kernel_le = false; 1243 char *filename; 1244 1245 msi_supported = true; 1246 1247 spapr = g_malloc0(sizeof(*spapr)); 1248 QLIST_INIT(&spapr->phbs); 1249 1250 cpu_ppc_hypercall = emulate_spapr_hypercall; 1251 1252 /* Allocate RMA if necessary */ 1253 rma_alloc_size = kvmppc_alloc_rma("ppc_spapr.rma", sysmem); 1254 1255 if (rma_alloc_size == -1) { 1256 hw_error("qemu: Unable to create RMA\n"); 1257 exit(1); 1258 } 1259 1260 if (rma_alloc_size && (rma_alloc_size < node0_size)) { 1261 spapr->rma_size = rma_alloc_size; 1262 } else { 1263 spapr->rma_size = node0_size; 1264 1265 /* With KVM, we don't actually know whether KVM supports an 1266 * unbounded RMA (PR KVM) or is limited by the hash table size 1267 * (HV KVM using VRMA), so we always assume the latter 1268 * 1269 * In that case, we also limit the initial allocations for RTAS 1270 * etc... to 256M since we have no way to know what the VRMA size 1271 * is going to be as it depends on the size of the hash table 1272 * isn't determined yet. 1273 */ 1274 if (kvm_enabled()) { 1275 spapr->vrma_adjust = 1; 1276 spapr->rma_size = MIN(spapr->rma_size, 0x10000000); 1277 } 1278 } 1279 1280 if (spapr->rma_size > node0_size) { 1281 fprintf(stderr, "Error: Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")\n", 1282 spapr->rma_size); 1283 exit(1); 1284 } 1285 1286 /* We place the device tree and RTAS just below either the top of the RMA, 1287 * or just below 2GB, whichever is lowere, so that it can be 1288 * processed with 32-bit real mode code if necessary */ 1289 rtas_limit = MIN(spapr->rma_size, 0x80000000); 1290 spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE; 1291 spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE; 1292 load_limit = spapr->fdt_addr - FW_OVERHEAD; 1293 1294 /* We aim for a hash table of size 1/128 the size of RAM. The 1295 * normal rule of thumb is 1/64 the size of RAM, but that's much 1296 * more than needed for the Linux guests we support. */ 1297 spapr->htab_shift = 18; /* Minimum architected size */ 1298 while (spapr->htab_shift <= 46) { 1299 if ((1ULL << (spapr->htab_shift + 7)) >= ram_size) { 1300 break; 1301 } 1302 spapr->htab_shift++; 1303 } 1304 1305 /* Set up Interrupt Controller before we create the VCPUs */ 1306 spapr->icp = xics_system_init(smp_cpus * kvmppc_smt_threads() / smp_threads, 1307 XICS_IRQS); 1308 spapr->next_irq = XICS_IRQ_BASE; 1309 1310 /* init CPUs */ 1311 if (cpu_model == NULL) { 1312 cpu_model = kvm_enabled() ? "host" : "POWER7"; 1313 } 1314 for (i = 0; i < smp_cpus; i++) { 1315 cpu = cpu_ppc_init(cpu_model); 1316 if (cpu == NULL) { 1317 fprintf(stderr, "Unable to find PowerPC CPU definition\n"); 1318 exit(1); 1319 } 1320 env = &cpu->env; 1321 1322 /* Set time-base frequency to 512 MHz */ 1323 cpu_ppc_tb_init(env, TIMEBASE_FREQ); 1324 1325 /* PAPR always has exception vectors in RAM not ROM. To ensure this, 1326 * MSR[IP] should never be set. 1327 */ 1328 env->msr_mask &= ~(1 << 6); 1329 1330 /* Tell KVM that we're in PAPR mode */ 1331 if (kvm_enabled()) { 1332 kvmppc_set_papr(cpu); 1333 } 1334 1335 if (cpu->max_compat) { 1336 if (ppc_set_compat(cpu, cpu->max_compat) < 0) { 1337 exit(1); 1338 } 1339 } 1340 1341 xics_cpu_setup(spapr->icp, cpu); 1342 1343 qemu_register_reset(spapr_cpu_reset, cpu); 1344 } 1345 1346 /* allocate RAM */ 1347 spapr->ram_limit = ram_size; 1348 if (spapr->ram_limit > rma_alloc_size) { 1349 ram_addr_t nonrma_base = rma_alloc_size; 1350 ram_addr_t nonrma_size = spapr->ram_limit - rma_alloc_size; 1351 1352 memory_region_init_ram(ram, NULL, "ppc_spapr.ram", nonrma_size); 1353 vmstate_register_ram_global(ram); 1354 memory_region_add_subregion(sysmem, nonrma_base, ram); 1355 } 1356 1357 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin"); 1358 spapr->rtas_size = load_image_targphys(filename, spapr->rtas_addr, 1359 rtas_limit - spapr->rtas_addr); 1360 if (spapr->rtas_size < 0) { 1361 hw_error("qemu: could not load LPAR rtas '%s'\n", filename); 1362 exit(1); 1363 } 1364 if (spapr->rtas_size > RTAS_MAX_SIZE) { 1365 hw_error("RTAS too big ! 0x%lx bytes (max is 0x%x)\n", 1366 spapr->rtas_size, RTAS_MAX_SIZE); 1367 exit(1); 1368 } 1369 g_free(filename); 1370 1371 /* Set up EPOW events infrastructure */ 1372 spapr_events_init(spapr); 1373 1374 /* Set up VIO bus */ 1375 spapr->vio_bus = spapr_vio_bus_init(); 1376 1377 for (i = 0; i < MAX_SERIAL_PORTS; i++) { 1378 if (serial_hds[i]) { 1379 spapr_vty_create(spapr->vio_bus, serial_hds[i]); 1380 } 1381 } 1382 1383 /* We always have at least the nvram device on VIO */ 1384 spapr_create_nvram(spapr); 1385 1386 /* Set up PCI */ 1387 spapr_pci_msi_init(spapr, SPAPR_PCI_MSI_WINDOW); 1388 spapr_pci_rtas_init(); 1389 1390 phb = spapr_create_phb(spapr, 0); 1391 1392 for (i = 0; i < nb_nics; i++) { 1393 NICInfo *nd = &nd_table[i]; 1394 1395 if (!nd->model) { 1396 nd->model = g_strdup("ibmveth"); 1397 } 1398 1399 if (strcmp(nd->model, "ibmveth") == 0) { 1400 spapr_vlan_create(spapr->vio_bus, nd); 1401 } else { 1402 pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); 1403 } 1404 } 1405 1406 for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { 1407 spapr_vscsi_create(spapr->vio_bus); 1408 } 1409 1410 /* Graphics */ 1411 if (spapr_vga_init(phb->bus)) { 1412 spapr->has_graphics = true; 1413 } 1414 1415 if (usb_enabled(spapr->has_graphics)) { 1416 pci_create_simple(phb->bus, -1, "pci-ohci"); 1417 if (spapr->has_graphics) { 1418 usbdevice_create("keyboard"); 1419 usbdevice_create("mouse"); 1420 } 1421 } 1422 1423 if (spapr->rma_size < (MIN_RMA_SLOF << 20)) { 1424 fprintf(stderr, "qemu: pSeries SLOF firmware requires >= " 1425 "%ldM guest RMA (Real Mode Area memory)\n", MIN_RMA_SLOF); 1426 exit(1); 1427 } 1428 1429 if (kernel_filename) { 1430 uint64_t lowaddr = 0; 1431 1432 kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, 1433 NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0); 1434 if (kernel_size == ELF_LOAD_WRONG_ENDIAN) { 1435 kernel_size = load_elf(kernel_filename, 1436 translate_kernel_address, NULL, 1437 NULL, &lowaddr, NULL, 0, ELF_MACHINE, 0); 1438 kernel_le = kernel_size > 0; 1439 } 1440 if (kernel_size < 0) { 1441 fprintf(stderr, "qemu: error loading %s: %s\n", 1442 kernel_filename, load_elf_strerror(kernel_size)); 1443 exit(1); 1444 } 1445 1446 /* load initrd */ 1447 if (initrd_filename) { 1448 /* Try to locate the initrd in the gap between the kernel 1449 * and the firmware. Add a bit of space just in case 1450 */ 1451 initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff; 1452 initrd_size = load_image_targphys(initrd_filename, initrd_base, 1453 load_limit - initrd_base); 1454 if (initrd_size < 0) { 1455 fprintf(stderr, "qemu: could not load initial ram disk '%s'\n", 1456 initrd_filename); 1457 exit(1); 1458 } 1459 } else { 1460 initrd_base = 0; 1461 initrd_size = 0; 1462 } 1463 } 1464 1465 if (bios_name == NULL) { 1466 bios_name = FW_FILE_NAME; 1467 } 1468 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 1469 fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); 1470 if (fw_size < 0) { 1471 hw_error("qemu: could not load LPAR rtas '%s'\n", filename); 1472 exit(1); 1473 } 1474 g_free(filename); 1475 1476 spapr->entry_point = 0x100; 1477 1478 vmstate_register(NULL, 0, &vmstate_spapr, spapr); 1479 register_savevm_live(NULL, "spapr/htab", -1, 1, 1480 &savevm_htab_handlers, spapr); 1481 1482 /* Prepare the device tree */ 1483 spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size, 1484 kernel_size, kernel_le, 1485 boot_device, kernel_cmdline, 1486 spapr->epow_irq); 1487 assert(spapr->fdt_skel != NULL); 1488 } 1489 1490 static int spapr_kvm_type(const char *vm_type) 1491 { 1492 if (!vm_type) { 1493 return 0; 1494 } 1495 1496 if (!strcmp(vm_type, "HV")) { 1497 return 1; 1498 } 1499 1500 if (!strcmp(vm_type, "PR")) { 1501 return 2; 1502 } 1503 1504 error_report("Unknown kvm-type specified '%s'", vm_type); 1505 exit(1); 1506 } 1507 1508 /* 1509 * Implementation of an interface to adjust firmware patch 1510 * for the bootindex property handling. 1511 */ 1512 static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus, 1513 DeviceState *dev) 1514 { 1515 #define CAST(type, obj, name) \ 1516 ((type *)object_dynamic_cast(OBJECT(obj), (name))) 1517 SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE); 1518 sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE); 1519 1520 if (d) { 1521 void *spapr = CAST(void, bus->parent, "spapr-vscsi"); 1522 VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI); 1523 USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE); 1524 1525 if (spapr) { 1526 /* 1527 * Replace "channel@0/disk@0,0" with "disk@8000000000000000": 1528 * We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun 1529 * in the top 16 bits of the 64-bit LUN 1530 */ 1531 unsigned id = 0x8000 | (d->id << 8) | d->lun; 1532 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 1533 (uint64_t)id << 48); 1534 } else if (virtio) { 1535 /* 1536 * We use SRP luns of the form 01000000 | (target << 8) | lun 1537 * in the top 32 bits of the 64-bit LUN 1538 * Note: the quote above is from SLOF and it is wrong, 1539 * the actual binding is: 1540 * swap 0100 or 10 << or 20 << ( target lun-id -- srplun ) 1541 */ 1542 unsigned id = 0x1000000 | (d->id << 16) | d->lun; 1543 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 1544 (uint64_t)id << 32); 1545 } else if (usb) { 1546 /* 1547 * We use SRP luns of the form 01000000 | (usb-port << 16) | lun 1548 * in the top 32 bits of the 64-bit LUN 1549 */ 1550 unsigned usb_port = atoi(usb->port->path); 1551 unsigned id = 0x1000000 | (usb_port << 16) | d->lun; 1552 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 1553 (uint64_t)id << 32); 1554 } 1555 } 1556 1557 if (phb) { 1558 /* Replace "pci" with "pci@800000020000000" */ 1559 return g_strdup_printf("pci@%"PRIX64, phb->buid); 1560 } 1561 1562 return NULL; 1563 } 1564 1565 static void spapr_machine_class_init(ObjectClass *oc, void *data) 1566 { 1567 MachineClass *mc = MACHINE_CLASS(oc); 1568 FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); 1569 1570 mc->name = "pseries"; 1571 mc->desc = "pSeries Logical Partition (PAPR compliant)"; 1572 mc->is_default = 1; 1573 mc->init = ppc_spapr_init; 1574 mc->reset = ppc_spapr_reset; 1575 mc->block_default_type = IF_SCSI; 1576 mc->max_cpus = MAX_CPUS; 1577 mc->no_parallel = 1; 1578 mc->default_boot_order = NULL; 1579 mc->kvm_type = spapr_kvm_type; 1580 1581 fwc->get_dev_path = spapr_get_fw_dev_path; 1582 } 1583 1584 static const TypeInfo spapr_machine_info = { 1585 .name = TYPE_SPAPR_MACHINE, 1586 .parent = TYPE_MACHINE, 1587 .class_init = spapr_machine_class_init, 1588 .interfaces = (InterfaceInfo[]) { 1589 { TYPE_FW_PATH_PROVIDER }, 1590 { } 1591 }, 1592 }; 1593 1594 static void spapr_machine_register_types(void) 1595 { 1596 type_register_static(&spapr_machine_info); 1597 } 1598 1599 type_init(spapr_machine_register_types) 1600