1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * Copyright (c) 2004-2007 Fabrice Bellard 5 * Copyright (c) 2007 Jocelyn Mayer 6 * Copyright (c) 2010 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 #include "sysemu/sysemu.h" 28 #include "hw/hw.h" 29 #include "hw/fw-path-provider.h" 30 #include "elf.h" 31 #include "net/net.h" 32 #include "sysemu/blockdev.h" 33 #include "sysemu/cpus.h" 34 #include "sysemu/kvm.h" 35 #include "kvm_ppc.h" 36 #include "mmu-hash64.h" 37 38 #include "hw/boards.h" 39 #include "hw/ppc/ppc.h" 40 #include "hw/loader.h" 41 42 #include "hw/ppc/spapr.h" 43 #include "hw/ppc/spapr_vio.h" 44 #include "hw/pci-host/spapr.h" 45 #include "hw/ppc/xics.h" 46 #include "hw/pci/msi.h" 47 48 #include "hw/pci/pci.h" 49 #include "hw/scsi/scsi.h" 50 #include "hw/virtio/virtio-scsi.h" 51 52 #include "exec/address-spaces.h" 53 #include "hw/usb.h" 54 #include "qemu/config-file.h" 55 #include "qemu/error-report.h" 56 57 #include <libfdt.h> 58 59 /* SLOF memory layout: 60 * 61 * SLOF raw image loaded at 0, copies its romfs right below the flat 62 * device-tree, then position SLOF itself 31M below that 63 * 64 * So we set FW_OVERHEAD to 40MB which should account for all of that 65 * and more 66 * 67 * We load our kernel at 4M, leaving space for SLOF initial image 68 */ 69 #define FDT_MAX_SIZE 0x40000 70 #define RTAS_MAX_SIZE 0x10000 71 #define FW_MAX_SIZE 0x400000 72 #define FW_FILE_NAME "slof.bin" 73 #define FW_OVERHEAD 0x2800000 74 #define KERNEL_LOAD_ADDR FW_MAX_SIZE 75 76 #define MIN_RMA_SLOF 128UL 77 78 #define TIMEBASE_FREQ 512000000ULL 79 80 #define MAX_CPUS 256 81 #define XICS_IRQS 1024 82 83 #define PHANDLE_XICP 0x00001111 84 85 #define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift)) 86 87 #define TYPE_SPAPR_MACHINE "spapr-machine" 88 89 sPAPREnvironment *spapr; 90 91 int spapr_allocate_irq(int hint, bool lsi) 92 { 93 int irq; 94 95 if (hint) { 96 irq = hint; 97 if (hint >= spapr->next_irq) { 98 spapr->next_irq = hint + 1; 99 } 100 /* FIXME: we should probably check for collisions somehow */ 101 } else { 102 irq = spapr->next_irq++; 103 } 104 105 /* Configure irq type */ 106 if (!xics_get_qirq(spapr->icp, irq)) { 107 return 0; 108 } 109 110 xics_set_irq_type(spapr->icp, irq, lsi); 111 112 return irq; 113 } 114 115 /* 116 * Allocate block of consequtive IRQs, returns a number of the first. 117 * If msi==true, aligns the first IRQ number to num. 118 */ 119 int spapr_allocate_irq_block(int num, bool lsi, bool msi) 120 { 121 int first = -1; 122 int i, hint = 0; 123 124 /* 125 * MSIMesage::data is used for storing VIRQ so 126 * it has to be aligned to num to support multiple 127 * MSI vectors. MSI-X is not affected by this. 128 * The hint is used for the first IRQ, the rest should 129 * be allocated continuously. 130 */ 131 if (msi) { 132 assert((num == 1) || (num == 2) || (num == 4) || 133 (num == 8) || (num == 16) || (num == 32)); 134 hint = (spapr->next_irq + num - 1) & ~(num - 1); 135 } 136 137 for (i = 0; i < num; ++i) { 138 int irq; 139 140 irq = spapr_allocate_irq(hint, lsi); 141 if (!irq) { 142 return -1; 143 } 144 145 if (0 == i) { 146 first = irq; 147 hint = 0; 148 } 149 150 /* If the above doesn't create a consecutive block then that's 151 * an internal bug */ 152 assert(irq == (first + i)); 153 } 154 155 return first; 156 } 157 158 static XICSState *try_create_xics(const char *type, int nr_servers, 159 int nr_irqs) 160 { 161 DeviceState *dev; 162 163 dev = qdev_create(NULL, type); 164 qdev_prop_set_uint32(dev, "nr_servers", nr_servers); 165 qdev_prop_set_uint32(dev, "nr_irqs", nr_irqs); 166 if (qdev_init(dev) < 0) { 167 return NULL; 168 } 169 170 return XICS_COMMON(dev); 171 } 172 173 static XICSState *xics_system_init(int nr_servers, int nr_irqs) 174 { 175 XICSState *icp = NULL; 176 177 if (kvm_enabled()) { 178 QemuOpts *machine_opts = qemu_get_machine_opts(); 179 bool irqchip_allowed = qemu_opt_get_bool(machine_opts, 180 "kernel_irqchip", true); 181 bool irqchip_required = qemu_opt_get_bool(machine_opts, 182 "kernel_irqchip", false); 183 if (irqchip_allowed) { 184 icp = try_create_xics(TYPE_KVM_XICS, nr_servers, nr_irqs); 185 } 186 187 if (irqchip_required && !icp) { 188 perror("Failed to create in-kernel XICS\n"); 189 abort(); 190 } 191 } 192 193 if (!icp) { 194 icp = try_create_xics(TYPE_XICS, nr_servers, nr_irqs); 195 } 196 197 if (!icp) { 198 perror("Failed to create XICS\n"); 199 abort(); 200 } 201 202 return icp; 203 } 204 205 static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr) 206 { 207 int ret = 0, offset; 208 CPUState *cpu; 209 char cpu_model[32]; 210 int smt = kvmppc_smt_threads(); 211 uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; 212 213 CPU_FOREACH(cpu) { 214 DeviceClass *dc = DEVICE_GET_CLASS(cpu); 215 int index = ppc_get_vcpu_dt_id(POWERPC_CPU(cpu)); 216 uint32_t associativity[] = {cpu_to_be32(0x5), 217 cpu_to_be32(0x0), 218 cpu_to_be32(0x0), 219 cpu_to_be32(0x0), 220 cpu_to_be32(cpu->numa_node), 221 cpu_to_be32(index)}; 222 223 if ((index % smt) != 0) { 224 continue; 225 } 226 227 snprintf(cpu_model, 32, "/cpus/%s@%x", dc->fw_name, 228 index); 229 230 offset = fdt_path_offset(fdt, cpu_model); 231 if (offset < 0) { 232 return offset; 233 } 234 235 if (nb_numa_nodes > 1) { 236 ret = fdt_setprop(fdt, offset, "ibm,associativity", associativity, 237 sizeof(associativity)); 238 if (ret < 0) { 239 return ret; 240 } 241 } 242 243 ret = fdt_setprop(fdt, offset, "ibm,pft-size", 244 pft_size_prop, sizeof(pft_size_prop)); 245 if (ret < 0) { 246 return ret; 247 } 248 } 249 return ret; 250 } 251 252 253 static size_t create_page_sizes_prop(CPUPPCState *env, uint32_t *prop, 254 size_t maxsize) 255 { 256 size_t maxcells = maxsize / sizeof(uint32_t); 257 int i, j, count; 258 uint32_t *p = prop; 259 260 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 261 struct ppc_one_seg_page_size *sps = &env->sps.sps[i]; 262 263 if (!sps->page_shift) { 264 break; 265 } 266 for (count = 0; count < PPC_PAGE_SIZES_MAX_SZ; count++) { 267 if (sps->enc[count].page_shift == 0) { 268 break; 269 } 270 } 271 if ((p - prop) >= (maxcells - 3 - count * 2)) { 272 break; 273 } 274 *(p++) = cpu_to_be32(sps->page_shift); 275 *(p++) = cpu_to_be32(sps->slb_enc); 276 *(p++) = cpu_to_be32(count); 277 for (j = 0; j < count; j++) { 278 *(p++) = cpu_to_be32(sps->enc[j].page_shift); 279 *(p++) = cpu_to_be32(sps->enc[j].pte_enc); 280 } 281 } 282 283 return (p - prop) * sizeof(uint32_t); 284 } 285 286 #define _FDT(exp) \ 287 do { \ 288 int ret = (exp); \ 289 if (ret < 0) { \ 290 fprintf(stderr, "qemu: error creating device tree: %s: %s\n", \ 291 #exp, fdt_strerror(ret)); \ 292 exit(1); \ 293 } \ 294 } while (0) 295 296 297 static void *spapr_create_fdt_skel(hwaddr initrd_base, 298 hwaddr initrd_size, 299 hwaddr kernel_size, 300 bool little_endian, 301 const char *boot_device, 302 const char *kernel_cmdline, 303 uint32_t epow_irq) 304 { 305 void *fdt; 306 CPUState *cs; 307 uint32_t start_prop = cpu_to_be32(initrd_base); 308 uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size); 309 char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt" 310 "\0hcall-tce\0hcall-vio\0hcall-splpar\0hcall-bulk\0hcall-set-mode"; 311 char qemu_hypertas_prop[] = "hcall-memop1"; 312 uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)}; 313 uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)}; 314 int i, smt = kvmppc_smt_threads(); 315 unsigned char vec5[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x80}; 316 317 fdt = g_malloc0(FDT_MAX_SIZE); 318 _FDT((fdt_create(fdt, FDT_MAX_SIZE))); 319 320 if (kernel_size) { 321 _FDT((fdt_add_reservemap_entry(fdt, KERNEL_LOAD_ADDR, kernel_size))); 322 } 323 if (initrd_size) { 324 _FDT((fdt_add_reservemap_entry(fdt, initrd_base, initrd_size))); 325 } 326 _FDT((fdt_finish_reservemap(fdt))); 327 328 /* Root node */ 329 _FDT((fdt_begin_node(fdt, ""))); 330 _FDT((fdt_property_string(fdt, "device_type", "chrp"))); 331 _FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)"))); 332 _FDT((fdt_property_string(fdt, "compatible", "qemu,pseries"))); 333 334 _FDT((fdt_property_cell(fdt, "#address-cells", 0x2))); 335 _FDT((fdt_property_cell(fdt, "#size-cells", 0x2))); 336 337 /* /chosen */ 338 _FDT((fdt_begin_node(fdt, "chosen"))); 339 340 /* Set Form1_affinity */ 341 _FDT((fdt_property(fdt, "ibm,architecture-vec-5", vec5, sizeof(vec5)))); 342 343 _FDT((fdt_property_string(fdt, "bootargs", kernel_cmdline))); 344 _FDT((fdt_property(fdt, "linux,initrd-start", 345 &start_prop, sizeof(start_prop)))); 346 _FDT((fdt_property(fdt, "linux,initrd-end", 347 &end_prop, sizeof(end_prop)))); 348 if (kernel_size) { 349 uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR), 350 cpu_to_be64(kernel_size) }; 351 352 _FDT((fdt_property(fdt, "qemu,boot-kernel", &kprop, sizeof(kprop)))); 353 if (little_endian) { 354 _FDT((fdt_property(fdt, "qemu,boot-kernel-le", NULL, 0))); 355 } 356 } 357 if (boot_device) { 358 _FDT((fdt_property_string(fdt, "qemu,boot-device", boot_device))); 359 } 360 _FDT((fdt_property_cell(fdt, "qemu,graphic-width", graphic_width))); 361 _FDT((fdt_property_cell(fdt, "qemu,graphic-height", graphic_height))); 362 _FDT((fdt_property_cell(fdt, "qemu,graphic-depth", graphic_depth))); 363 364 _FDT((fdt_end_node(fdt))); 365 366 /* cpus */ 367 _FDT((fdt_begin_node(fdt, "cpus"))); 368 369 _FDT((fdt_property_cell(fdt, "#address-cells", 0x1))); 370 _FDT((fdt_property_cell(fdt, "#size-cells", 0x0))); 371 372 CPU_FOREACH(cs) { 373 PowerPCCPU *cpu = POWERPC_CPU(cs); 374 CPUPPCState *env = &cpu->env; 375 DeviceClass *dc = DEVICE_GET_CLASS(cs); 376 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 377 int index = ppc_get_vcpu_dt_id(cpu); 378 uint32_t servers_prop[smp_threads]; 379 uint32_t gservers_prop[smp_threads * 2]; 380 char *nodename; 381 uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 382 0xffffffff, 0xffffffff}; 383 uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TIMEBASE_FREQ; 384 uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; 385 uint32_t page_sizes_prop[64]; 386 size_t page_sizes_prop_size; 387 388 if ((index % smt) != 0) { 389 continue; 390 } 391 392 nodename = g_strdup_printf("%s@%x", dc->fw_name, index); 393 394 _FDT((fdt_begin_node(fdt, nodename))); 395 396 g_free(nodename); 397 398 _FDT((fdt_property_cell(fdt, "reg", index))); 399 _FDT((fdt_property_string(fdt, "device_type", "cpu"))); 400 401 _FDT((fdt_property_cell(fdt, "cpu-version", env->spr[SPR_PVR]))); 402 _FDT((fdt_property_cell(fdt, "d-cache-block-size", 403 env->dcache_line_size))); 404 _FDT((fdt_property_cell(fdt, "d-cache-line-size", 405 env->dcache_line_size))); 406 _FDT((fdt_property_cell(fdt, "i-cache-block-size", 407 env->icache_line_size))); 408 _FDT((fdt_property_cell(fdt, "i-cache-line-size", 409 env->icache_line_size))); 410 411 if (pcc->l1_dcache_size) { 412 _FDT((fdt_property_cell(fdt, "d-cache-size", pcc->l1_dcache_size))); 413 } else { 414 fprintf(stderr, "Warning: Unknown L1 dcache size for cpu\n"); 415 } 416 if (pcc->l1_icache_size) { 417 _FDT((fdt_property_cell(fdt, "i-cache-size", pcc->l1_icache_size))); 418 } else { 419 fprintf(stderr, "Warning: Unknown L1 icache size for cpu\n"); 420 } 421 422 _FDT((fdt_property_cell(fdt, "timebase-frequency", tbfreq))); 423 _FDT((fdt_property_cell(fdt, "clock-frequency", cpufreq))); 424 _FDT((fdt_property_cell(fdt, "ibm,slb-size", env->slb_nr))); 425 _FDT((fdt_property_string(fdt, "status", "okay"))); 426 _FDT((fdt_property(fdt, "64-bit", NULL, 0))); 427 428 /* Build interrupt servers and gservers properties */ 429 for (i = 0; i < smp_threads; i++) { 430 servers_prop[i] = cpu_to_be32(index + i); 431 /* Hack, direct the group queues back to cpu 0 */ 432 gservers_prop[i*2] = cpu_to_be32(index + i); 433 gservers_prop[i*2 + 1] = 0; 434 } 435 _FDT((fdt_property(fdt, "ibm,ppc-interrupt-server#s", 436 servers_prop, sizeof(servers_prop)))); 437 _FDT((fdt_property(fdt, "ibm,ppc-interrupt-gserver#s", 438 gservers_prop, sizeof(gservers_prop)))); 439 440 if (env->spr_cb[SPR_PURR].oea_read) { 441 _FDT((fdt_property(fdt, "ibm,purr", NULL, 0))); 442 } 443 444 if (env->mmu_model & POWERPC_MMU_1TSEG) { 445 _FDT((fdt_property(fdt, "ibm,processor-segment-sizes", 446 segs, sizeof(segs)))); 447 } 448 449 /* Advertise VMX/VSX (vector extensions) if available 450 * 0 / no property == no vector extensions 451 * 1 == VMX / Altivec available 452 * 2 == VSX available */ 453 if (env->insns_flags & PPC_ALTIVEC) { 454 uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1; 455 456 _FDT((fdt_property_cell(fdt, "ibm,vmx", vmx))); 457 } 458 459 /* Advertise DFP (Decimal Floating Point) if available 460 * 0 / no property == no DFP 461 * 1 == DFP available */ 462 if (env->insns_flags2 & PPC2_DFP) { 463 _FDT((fdt_property_cell(fdt, "ibm,dfp", 1))); 464 } 465 466 page_sizes_prop_size = create_page_sizes_prop(env, page_sizes_prop, 467 sizeof(page_sizes_prop)); 468 if (page_sizes_prop_size) { 469 _FDT((fdt_property(fdt, "ibm,segment-page-sizes", 470 page_sizes_prop, page_sizes_prop_size))); 471 } 472 473 _FDT((fdt_end_node(fdt))); 474 } 475 476 _FDT((fdt_end_node(fdt))); 477 478 /* RTAS */ 479 _FDT((fdt_begin_node(fdt, "rtas"))); 480 481 _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas_prop, 482 sizeof(hypertas_prop)))); 483 _FDT((fdt_property(fdt, "qemu,hypertas-functions", qemu_hypertas_prop, 484 sizeof(qemu_hypertas_prop)))); 485 486 _FDT((fdt_property(fdt, "ibm,associativity-reference-points", 487 refpoints, sizeof(refpoints)))); 488 489 _FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX))); 490 491 _FDT((fdt_end_node(fdt))); 492 493 /* interrupt controller */ 494 _FDT((fdt_begin_node(fdt, "interrupt-controller"))); 495 496 _FDT((fdt_property_string(fdt, "device_type", 497 "PowerPC-External-Interrupt-Presentation"))); 498 _FDT((fdt_property_string(fdt, "compatible", "IBM,ppc-xicp"))); 499 _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0))); 500 _FDT((fdt_property(fdt, "ibm,interrupt-server-ranges", 501 interrupt_server_ranges_prop, 502 sizeof(interrupt_server_ranges_prop)))); 503 _FDT((fdt_property_cell(fdt, "#interrupt-cells", 2))); 504 _FDT((fdt_property_cell(fdt, "linux,phandle", PHANDLE_XICP))); 505 _FDT((fdt_property_cell(fdt, "phandle", PHANDLE_XICP))); 506 507 _FDT((fdt_end_node(fdt))); 508 509 /* vdevice */ 510 _FDT((fdt_begin_node(fdt, "vdevice"))); 511 512 _FDT((fdt_property_string(fdt, "device_type", "vdevice"))); 513 _FDT((fdt_property_string(fdt, "compatible", "IBM,vdevice"))); 514 _FDT((fdt_property_cell(fdt, "#address-cells", 0x1))); 515 _FDT((fdt_property_cell(fdt, "#size-cells", 0x0))); 516 _FDT((fdt_property_cell(fdt, "#interrupt-cells", 0x2))); 517 _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0))); 518 519 _FDT((fdt_end_node(fdt))); 520 521 /* event-sources */ 522 spapr_events_fdt_skel(fdt, epow_irq); 523 524 _FDT((fdt_end_node(fdt))); /* close root node */ 525 _FDT((fdt_finish(fdt))); 526 527 return fdt; 528 } 529 530 static int spapr_populate_memory(sPAPREnvironment *spapr, void *fdt) 531 { 532 uint32_t associativity[] = {cpu_to_be32(0x4), cpu_to_be32(0x0), 533 cpu_to_be32(0x0), cpu_to_be32(0x0), 534 cpu_to_be32(0x0)}; 535 char mem_name[32]; 536 hwaddr node0_size, mem_start, node_size; 537 uint64_t mem_reg_property[2]; 538 int i, off; 539 540 /* memory node(s) */ 541 if (nb_numa_nodes > 1 && node_mem[0] < ram_size) { 542 node0_size = node_mem[0]; 543 } else { 544 node0_size = ram_size; 545 } 546 547 /* RMA */ 548 mem_reg_property[0] = 0; 549 mem_reg_property[1] = cpu_to_be64(spapr->rma_size); 550 off = fdt_add_subnode(fdt, 0, "memory@0"); 551 _FDT(off); 552 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 553 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 554 sizeof(mem_reg_property)))); 555 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, 556 sizeof(associativity)))); 557 558 /* RAM: Node 0 */ 559 if (node0_size > spapr->rma_size) { 560 mem_reg_property[0] = cpu_to_be64(spapr->rma_size); 561 mem_reg_property[1] = cpu_to_be64(node0_size - spapr->rma_size); 562 563 sprintf(mem_name, "memory@" TARGET_FMT_lx, spapr->rma_size); 564 off = fdt_add_subnode(fdt, 0, mem_name); 565 _FDT(off); 566 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 567 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 568 sizeof(mem_reg_property)))); 569 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, 570 sizeof(associativity)))); 571 } 572 573 /* RAM: Node 1 and beyond */ 574 mem_start = node0_size; 575 for (i = 1; i < nb_numa_nodes; i++) { 576 mem_reg_property[0] = cpu_to_be64(mem_start); 577 if (mem_start >= ram_size) { 578 node_size = 0; 579 } else { 580 node_size = node_mem[i]; 581 if (node_size > ram_size - mem_start) { 582 node_size = ram_size - mem_start; 583 } 584 } 585 mem_reg_property[1] = cpu_to_be64(node_size); 586 associativity[3] = associativity[4] = cpu_to_be32(i); 587 sprintf(mem_name, "memory@" TARGET_FMT_lx, mem_start); 588 off = fdt_add_subnode(fdt, 0, mem_name); 589 _FDT(off); 590 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 591 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, 592 sizeof(mem_reg_property)))); 593 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, 594 sizeof(associativity)))); 595 mem_start += node_size; 596 } 597 598 return 0; 599 } 600 601 static void spapr_finalize_fdt(sPAPREnvironment *spapr, 602 hwaddr fdt_addr, 603 hwaddr rtas_addr, 604 hwaddr rtas_size) 605 { 606 int ret, i; 607 size_t cb = 0; 608 char *bootlist; 609 void *fdt; 610 sPAPRPHBState *phb; 611 612 fdt = g_malloc(FDT_MAX_SIZE); 613 614 /* open out the base tree into a temp buffer for the final tweaks */ 615 _FDT((fdt_open_into(spapr->fdt_skel, fdt, FDT_MAX_SIZE))); 616 617 ret = spapr_populate_memory(spapr, fdt); 618 if (ret < 0) { 619 fprintf(stderr, "couldn't setup memory nodes in fdt\n"); 620 exit(1); 621 } 622 623 ret = spapr_populate_vdevice(spapr->vio_bus, fdt); 624 if (ret < 0) { 625 fprintf(stderr, "couldn't setup vio devices in fdt\n"); 626 exit(1); 627 } 628 629 QLIST_FOREACH(phb, &spapr->phbs, list) { 630 ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt); 631 } 632 633 if (ret < 0) { 634 fprintf(stderr, "couldn't setup PCI devices in fdt\n"); 635 exit(1); 636 } 637 638 /* RTAS */ 639 ret = spapr_rtas_device_tree_setup(fdt, rtas_addr, rtas_size); 640 if (ret < 0) { 641 fprintf(stderr, "Couldn't set up RTAS device tree properties\n"); 642 } 643 644 /* Advertise NUMA via ibm,associativity */ 645 ret = spapr_fixup_cpu_dt(fdt, spapr); 646 if (ret < 0) { 647 fprintf(stderr, "Couldn't finalize CPU device tree properties\n"); 648 } 649 650 bootlist = get_boot_devices_list(&cb, true); 651 if (cb && bootlist) { 652 int offset = fdt_path_offset(fdt, "/chosen"); 653 if (offset < 0) { 654 exit(1); 655 } 656 for (i = 0; i < cb; i++) { 657 if (bootlist[i] == '\n') { 658 bootlist[i] = ' '; 659 } 660 661 } 662 ret = fdt_setprop_string(fdt, offset, "qemu,boot-list", bootlist); 663 } 664 665 if (!spapr->has_graphics) { 666 spapr_populate_chosen_stdout(fdt, spapr->vio_bus); 667 } 668 669 _FDT((fdt_pack(fdt))); 670 671 if (fdt_totalsize(fdt) > FDT_MAX_SIZE) { 672 hw_error("FDT too big ! 0x%x bytes (max is 0x%x)\n", 673 fdt_totalsize(fdt), FDT_MAX_SIZE); 674 exit(1); 675 } 676 677 cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); 678 679 g_free(fdt); 680 } 681 682 static uint64_t translate_kernel_address(void *opaque, uint64_t addr) 683 { 684 return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR; 685 } 686 687 static void emulate_spapr_hypercall(PowerPCCPU *cpu) 688 { 689 CPUPPCState *env = &cpu->env; 690 691 if (msr_pr) { 692 hcall_dprintf("Hypercall made with MSR[PR]=1\n"); 693 env->gpr[3] = H_PRIVILEGE; 694 } else { 695 env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); 696 } 697 } 698 699 static void spapr_reset_htab(sPAPREnvironment *spapr) 700 { 701 long shift; 702 703 /* allocate hash page table. For now we always make this 16mb, 704 * later we should probably make it scale to the size of guest 705 * RAM */ 706 707 shift = kvmppc_reset_htab(spapr->htab_shift); 708 709 if (shift > 0) { 710 /* Kernel handles htab, we don't need to allocate one */ 711 spapr->htab_shift = shift; 712 kvmppc_kern_htab = true; 713 } else { 714 if (!spapr->htab) { 715 /* Allocate an htab if we don't yet have one */ 716 spapr->htab = qemu_memalign(HTAB_SIZE(spapr), HTAB_SIZE(spapr)); 717 } 718 719 /* And clear it */ 720 memset(spapr->htab, 0, HTAB_SIZE(spapr)); 721 } 722 723 /* Update the RMA size if necessary */ 724 if (spapr->vrma_adjust) { 725 hwaddr node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size; 726 spapr->rma_size = kvmppc_rma_size(node0_size, spapr->htab_shift); 727 } 728 } 729 730 static void ppc_spapr_reset(void) 731 { 732 PowerPCCPU *first_ppc_cpu; 733 734 /* Reset the hash table & recalc the RMA */ 735 spapr_reset_htab(spapr); 736 737 qemu_devices_reset(); 738 739 /* Load the fdt */ 740 spapr_finalize_fdt(spapr, spapr->fdt_addr, spapr->rtas_addr, 741 spapr->rtas_size); 742 743 /* Set up the entry state */ 744 first_ppc_cpu = POWERPC_CPU(first_cpu); 745 first_ppc_cpu->env.gpr[3] = spapr->fdt_addr; 746 first_ppc_cpu->env.gpr[5] = 0; 747 first_cpu->halted = 0; 748 first_ppc_cpu->env.nip = spapr->entry_point; 749 750 } 751 752 static void spapr_cpu_reset(void *opaque) 753 { 754 PowerPCCPU *cpu = opaque; 755 CPUState *cs = CPU(cpu); 756 CPUPPCState *env = &cpu->env; 757 758 cpu_reset(cs); 759 760 /* All CPUs start halted. CPU0 is unhalted from the machine level 761 * reset code and the rest are explicitly started up by the guest 762 * using an RTAS call */ 763 cs->halted = 1; 764 765 env->spr[SPR_HIOR] = 0; 766 767 env->external_htab = (uint8_t *)spapr->htab; 768 if (kvm_enabled() && !env->external_htab) { 769 /* 770 * HV KVM, set external_htab to 1 so our ppc_hash64_load_hpte* 771 * functions do the right thing. 772 */ 773 env->external_htab = (void *)1; 774 } 775 env->htab_base = -1; 776 /* 777 * htab_mask is the mask used to normalize hash value to PTEG index. 778 * htab_shift is log2 of hash table size. 779 * We have 8 hpte per group, and each hpte is 16 bytes. 780 * ie have 128 bytes per hpte entry. 781 */ 782 env->htab_mask = (1ULL << ((spapr)->htab_shift - 7)) - 1; 783 env->spr[SPR_SDR1] = (target_ulong)(uintptr_t)spapr->htab | 784 (spapr->htab_shift - 18); 785 } 786 787 static void spapr_create_nvram(sPAPREnvironment *spapr) 788 { 789 DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram"); 790 DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); 791 792 if (dinfo) { 793 qdev_prop_set_drive_nofail(dev, "drive", dinfo->bdrv); 794 } 795 796 qdev_init_nofail(dev); 797 798 spapr->nvram = (struct sPAPRNVRAM *)dev; 799 } 800 801 /* Returns whether we want to use VGA or not */ 802 static int spapr_vga_init(PCIBus *pci_bus) 803 { 804 switch (vga_interface_type) { 805 case VGA_NONE: 806 return false; 807 case VGA_DEVICE: 808 return true; 809 case VGA_STD: 810 return pci_vga_init(pci_bus) != NULL; 811 default: 812 fprintf(stderr, "This vga model is not supported," 813 "currently it only supports -vga std\n"); 814 exit(0); 815 } 816 } 817 818 static const VMStateDescription vmstate_spapr = { 819 .name = "spapr", 820 .version_id = 1, 821 .minimum_version_id = 1, 822 .minimum_version_id_old = 1, 823 .fields = (VMStateField []) { 824 VMSTATE_UINT32(next_irq, sPAPREnvironment), 825 826 /* RTC offset */ 827 VMSTATE_UINT64(rtc_offset, sPAPREnvironment), 828 829 VMSTATE_END_OF_LIST() 830 }, 831 }; 832 833 #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) 834 #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) 835 #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) 836 #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY)) 837 838 static int htab_save_setup(QEMUFile *f, void *opaque) 839 { 840 sPAPREnvironment *spapr = opaque; 841 842 /* "Iteration" header */ 843 qemu_put_be32(f, spapr->htab_shift); 844 845 if (spapr->htab) { 846 spapr->htab_save_index = 0; 847 spapr->htab_first_pass = true; 848 } else { 849 assert(kvm_enabled()); 850 851 spapr->htab_fd = kvmppc_get_htab_fd(false); 852 if (spapr->htab_fd < 0) { 853 fprintf(stderr, "Unable to open fd for reading hash table from KVM: %s\n", 854 strerror(errno)); 855 return -1; 856 } 857 } 858 859 860 return 0; 861 } 862 863 static void htab_save_first_pass(QEMUFile *f, sPAPREnvironment *spapr, 864 int64_t max_ns) 865 { 866 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 867 int index = spapr->htab_save_index; 868 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 869 870 assert(spapr->htab_first_pass); 871 872 do { 873 int chunkstart; 874 875 /* Consume invalid HPTEs */ 876 while ((index < htabslots) 877 && !HPTE_VALID(HPTE(spapr->htab, index))) { 878 index++; 879 CLEAN_HPTE(HPTE(spapr->htab, index)); 880 } 881 882 /* Consume valid HPTEs */ 883 chunkstart = index; 884 while ((index < htabslots) 885 && HPTE_VALID(HPTE(spapr->htab, index))) { 886 index++; 887 CLEAN_HPTE(HPTE(spapr->htab, index)); 888 } 889 890 if (index > chunkstart) { 891 int n_valid = index - chunkstart; 892 893 qemu_put_be32(f, chunkstart); 894 qemu_put_be16(f, n_valid); 895 qemu_put_be16(f, 0); 896 qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), 897 HASH_PTE_SIZE_64 * n_valid); 898 899 if ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 900 break; 901 } 902 } 903 } while ((index < htabslots) && !qemu_file_rate_limit(f)); 904 905 if (index >= htabslots) { 906 assert(index == htabslots); 907 index = 0; 908 spapr->htab_first_pass = false; 909 } 910 spapr->htab_save_index = index; 911 } 912 913 static int htab_save_later_pass(QEMUFile *f, sPAPREnvironment *spapr, 914 int64_t max_ns) 915 { 916 bool final = max_ns < 0; 917 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; 918 int examined = 0, sent = 0; 919 int index = spapr->htab_save_index; 920 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 921 922 assert(!spapr->htab_first_pass); 923 924 do { 925 int chunkstart, invalidstart; 926 927 /* Consume non-dirty HPTEs */ 928 while ((index < htabslots) 929 && !HPTE_DIRTY(HPTE(spapr->htab, index))) { 930 index++; 931 examined++; 932 } 933 934 chunkstart = index; 935 /* Consume valid dirty HPTEs */ 936 while ((index < htabslots) 937 && HPTE_DIRTY(HPTE(spapr->htab, index)) 938 && HPTE_VALID(HPTE(spapr->htab, index))) { 939 CLEAN_HPTE(HPTE(spapr->htab, index)); 940 index++; 941 examined++; 942 } 943 944 invalidstart = index; 945 /* Consume invalid dirty HPTEs */ 946 while ((index < htabslots) 947 && HPTE_DIRTY(HPTE(spapr->htab, index)) 948 && !HPTE_VALID(HPTE(spapr->htab, index))) { 949 CLEAN_HPTE(HPTE(spapr->htab, index)); 950 index++; 951 examined++; 952 } 953 954 if (index > chunkstart) { 955 int n_valid = invalidstart - chunkstart; 956 int n_invalid = index - invalidstart; 957 958 qemu_put_be32(f, chunkstart); 959 qemu_put_be16(f, n_valid); 960 qemu_put_be16(f, n_invalid); 961 qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), 962 HASH_PTE_SIZE_64 * n_valid); 963 sent += index - chunkstart; 964 965 if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { 966 break; 967 } 968 } 969 970 if (examined >= htabslots) { 971 break; 972 } 973 974 if (index >= htabslots) { 975 assert(index == htabslots); 976 index = 0; 977 } 978 } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); 979 980 if (index >= htabslots) { 981 assert(index == htabslots); 982 index = 0; 983 } 984 985 spapr->htab_save_index = index; 986 987 return (examined >= htabslots) && (sent == 0) ? 1 : 0; 988 } 989 990 #define MAX_ITERATION_NS 5000000 /* 5 ms */ 991 #define MAX_KVM_BUF_SIZE 2048 992 993 static int htab_save_iterate(QEMUFile *f, void *opaque) 994 { 995 sPAPREnvironment *spapr = opaque; 996 int rc = 0; 997 998 /* Iteration header */ 999 qemu_put_be32(f, 0); 1000 1001 if (!spapr->htab) { 1002 assert(kvm_enabled()); 1003 1004 rc = kvmppc_save_htab(f, spapr->htab_fd, 1005 MAX_KVM_BUF_SIZE, MAX_ITERATION_NS); 1006 if (rc < 0) { 1007 return rc; 1008 } 1009 } else if (spapr->htab_first_pass) { 1010 htab_save_first_pass(f, spapr, MAX_ITERATION_NS); 1011 } else { 1012 rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS); 1013 } 1014 1015 /* End marker */ 1016 qemu_put_be32(f, 0); 1017 qemu_put_be16(f, 0); 1018 qemu_put_be16(f, 0); 1019 1020 return rc; 1021 } 1022 1023 static int htab_save_complete(QEMUFile *f, void *opaque) 1024 { 1025 sPAPREnvironment *spapr = opaque; 1026 1027 /* Iteration header */ 1028 qemu_put_be32(f, 0); 1029 1030 if (!spapr->htab) { 1031 int rc; 1032 1033 assert(kvm_enabled()); 1034 1035 rc = kvmppc_save_htab(f, spapr->htab_fd, MAX_KVM_BUF_SIZE, -1); 1036 if (rc < 0) { 1037 return rc; 1038 } 1039 close(spapr->htab_fd); 1040 spapr->htab_fd = -1; 1041 } else { 1042 htab_save_later_pass(f, spapr, -1); 1043 } 1044 1045 /* End marker */ 1046 qemu_put_be32(f, 0); 1047 qemu_put_be16(f, 0); 1048 qemu_put_be16(f, 0); 1049 1050 return 0; 1051 } 1052 1053 static int htab_load(QEMUFile *f, void *opaque, int version_id) 1054 { 1055 sPAPREnvironment *spapr = opaque; 1056 uint32_t section_hdr; 1057 int fd = -1; 1058 1059 if (version_id < 1 || version_id > 1) { 1060 fprintf(stderr, "htab_load() bad version\n"); 1061 return -EINVAL; 1062 } 1063 1064 section_hdr = qemu_get_be32(f); 1065 1066 if (section_hdr) { 1067 /* First section, just the hash shift */ 1068 if (spapr->htab_shift != section_hdr) { 1069 return -EINVAL; 1070 } 1071 return 0; 1072 } 1073 1074 if (!spapr->htab) { 1075 assert(kvm_enabled()); 1076 1077 fd = kvmppc_get_htab_fd(true); 1078 if (fd < 0) { 1079 fprintf(stderr, "Unable to open fd to restore KVM hash table: %s\n", 1080 strerror(errno)); 1081 } 1082 } 1083 1084 while (true) { 1085 uint32_t index; 1086 uint16_t n_valid, n_invalid; 1087 1088 index = qemu_get_be32(f); 1089 n_valid = qemu_get_be16(f); 1090 n_invalid = qemu_get_be16(f); 1091 1092 if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) { 1093 /* End of Stream */ 1094 break; 1095 } 1096 1097 if ((index + n_valid + n_invalid) > 1098 (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) { 1099 /* Bad index in stream */ 1100 fprintf(stderr, "htab_load() bad index %d (%hd+%hd entries) " 1101 "in htab stream (htab_shift=%d)\n", index, n_valid, n_invalid, 1102 spapr->htab_shift); 1103 return -EINVAL; 1104 } 1105 1106 if (spapr->htab) { 1107 if (n_valid) { 1108 qemu_get_buffer(f, HPTE(spapr->htab, index), 1109 HASH_PTE_SIZE_64 * n_valid); 1110 } 1111 if (n_invalid) { 1112 memset(HPTE(spapr->htab, index + n_valid), 0, 1113 HASH_PTE_SIZE_64 * n_invalid); 1114 } 1115 } else { 1116 int rc; 1117 1118 assert(fd >= 0); 1119 1120 rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid); 1121 if (rc < 0) { 1122 return rc; 1123 } 1124 } 1125 } 1126 1127 if (!spapr->htab) { 1128 assert(fd >= 0); 1129 close(fd); 1130 } 1131 1132 return 0; 1133 } 1134 1135 static SaveVMHandlers savevm_htab_handlers = { 1136 .save_live_setup = htab_save_setup, 1137 .save_live_iterate = htab_save_iterate, 1138 .save_live_complete = htab_save_complete, 1139 .load_state = htab_load, 1140 }; 1141 1142 /* pSeries LPAR / sPAPR hardware init */ 1143 static void ppc_spapr_init(QEMUMachineInitArgs *args) 1144 { 1145 ram_addr_t ram_size = args->ram_size; 1146 const char *cpu_model = args->cpu_model; 1147 const char *kernel_filename = args->kernel_filename; 1148 const char *kernel_cmdline = args->kernel_cmdline; 1149 const char *initrd_filename = args->initrd_filename; 1150 const char *boot_device = args->boot_order; 1151 PowerPCCPU *cpu; 1152 CPUPPCState *env; 1153 PCIHostState *phb; 1154 int i; 1155 MemoryRegion *sysmem = get_system_memory(); 1156 MemoryRegion *ram = g_new(MemoryRegion, 1); 1157 hwaddr rma_alloc_size; 1158 hwaddr node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size; 1159 uint32_t initrd_base = 0; 1160 long kernel_size = 0, initrd_size = 0; 1161 long load_limit, rtas_limit, fw_size; 1162 bool kernel_le = false; 1163 char *filename; 1164 1165 msi_supported = true; 1166 1167 spapr = g_malloc0(sizeof(*spapr)); 1168 QLIST_INIT(&spapr->phbs); 1169 1170 cpu_ppc_hypercall = emulate_spapr_hypercall; 1171 1172 /* Allocate RMA if necessary */ 1173 rma_alloc_size = kvmppc_alloc_rma("ppc_spapr.rma", sysmem); 1174 1175 if (rma_alloc_size == -1) { 1176 hw_error("qemu: Unable to create RMA\n"); 1177 exit(1); 1178 } 1179 1180 if (rma_alloc_size && (rma_alloc_size < node0_size)) { 1181 spapr->rma_size = rma_alloc_size; 1182 } else { 1183 spapr->rma_size = node0_size; 1184 1185 /* With KVM, we don't actually know whether KVM supports an 1186 * unbounded RMA (PR KVM) or is limited by the hash table size 1187 * (HV KVM using VRMA), so we always assume the latter 1188 * 1189 * In that case, we also limit the initial allocations for RTAS 1190 * etc... to 256M since we have no way to know what the VRMA size 1191 * is going to be as it depends on the size of the hash table 1192 * isn't determined yet. 1193 */ 1194 if (kvm_enabled()) { 1195 spapr->vrma_adjust = 1; 1196 spapr->rma_size = MIN(spapr->rma_size, 0x10000000); 1197 } 1198 } 1199 1200 if (spapr->rma_size > node0_size) { 1201 fprintf(stderr, "Error: Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")\n", 1202 spapr->rma_size); 1203 exit(1); 1204 } 1205 1206 /* We place the device tree and RTAS just below either the top of the RMA, 1207 * or just below 2GB, whichever is lowere, so that it can be 1208 * processed with 32-bit real mode code if necessary */ 1209 rtas_limit = MIN(spapr->rma_size, 0x80000000); 1210 spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE; 1211 spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE; 1212 load_limit = spapr->fdt_addr - FW_OVERHEAD; 1213 1214 /* We aim for a hash table of size 1/128 the size of RAM. The 1215 * normal rule of thumb is 1/64 the size of RAM, but that's much 1216 * more than needed for the Linux guests we support. */ 1217 spapr->htab_shift = 18; /* Minimum architected size */ 1218 while (spapr->htab_shift <= 46) { 1219 if ((1ULL << (spapr->htab_shift + 7)) >= ram_size) { 1220 break; 1221 } 1222 spapr->htab_shift++; 1223 } 1224 1225 /* Set up Interrupt Controller before we create the VCPUs */ 1226 spapr->icp = xics_system_init(smp_cpus * kvmppc_smt_threads() / smp_threads, 1227 XICS_IRQS); 1228 spapr->next_irq = XICS_IRQ_BASE; 1229 1230 /* init CPUs */ 1231 if (cpu_model == NULL) { 1232 cpu_model = kvm_enabled() ? "host" : "POWER7"; 1233 } 1234 for (i = 0; i < smp_cpus; i++) { 1235 cpu = cpu_ppc_init(cpu_model); 1236 if (cpu == NULL) { 1237 fprintf(stderr, "Unable to find PowerPC CPU definition\n"); 1238 exit(1); 1239 } 1240 env = &cpu->env; 1241 1242 /* Set time-base frequency to 512 MHz */ 1243 cpu_ppc_tb_init(env, TIMEBASE_FREQ); 1244 1245 /* PAPR always has exception vectors in RAM not ROM. To ensure this, 1246 * MSR[IP] should never be set. 1247 */ 1248 env->msr_mask &= ~(1 << 6); 1249 1250 /* Tell KVM that we're in PAPR mode */ 1251 if (kvm_enabled()) { 1252 kvmppc_set_papr(cpu); 1253 } 1254 1255 xics_cpu_setup(spapr->icp, cpu); 1256 1257 qemu_register_reset(spapr_cpu_reset, cpu); 1258 } 1259 1260 /* allocate RAM */ 1261 spapr->ram_limit = ram_size; 1262 if (spapr->ram_limit > rma_alloc_size) { 1263 ram_addr_t nonrma_base = rma_alloc_size; 1264 ram_addr_t nonrma_size = spapr->ram_limit - rma_alloc_size; 1265 1266 memory_region_init_ram(ram, NULL, "ppc_spapr.ram", nonrma_size); 1267 vmstate_register_ram_global(ram); 1268 memory_region_add_subregion(sysmem, nonrma_base, ram); 1269 } 1270 1271 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin"); 1272 spapr->rtas_size = load_image_targphys(filename, spapr->rtas_addr, 1273 rtas_limit - spapr->rtas_addr); 1274 if (spapr->rtas_size < 0) { 1275 hw_error("qemu: could not load LPAR rtas '%s'\n", filename); 1276 exit(1); 1277 } 1278 if (spapr->rtas_size > RTAS_MAX_SIZE) { 1279 hw_error("RTAS too big ! 0x%lx bytes (max is 0x%x)\n", 1280 spapr->rtas_size, RTAS_MAX_SIZE); 1281 exit(1); 1282 } 1283 g_free(filename); 1284 1285 /* Set up EPOW events infrastructure */ 1286 spapr_events_init(spapr); 1287 1288 /* Set up VIO bus */ 1289 spapr->vio_bus = spapr_vio_bus_init(); 1290 1291 for (i = 0; i < MAX_SERIAL_PORTS; i++) { 1292 if (serial_hds[i]) { 1293 spapr_vty_create(spapr->vio_bus, serial_hds[i]); 1294 } 1295 } 1296 1297 /* We always have at least the nvram device on VIO */ 1298 spapr_create_nvram(spapr); 1299 1300 /* Set up PCI */ 1301 spapr_pci_msi_init(spapr, SPAPR_PCI_MSI_WINDOW); 1302 spapr_pci_rtas_init(); 1303 1304 phb = spapr_create_phb(spapr, 0); 1305 1306 for (i = 0; i < nb_nics; i++) { 1307 NICInfo *nd = &nd_table[i]; 1308 1309 if (!nd->model) { 1310 nd->model = g_strdup("ibmveth"); 1311 } 1312 1313 if (strcmp(nd->model, "ibmveth") == 0) { 1314 spapr_vlan_create(spapr->vio_bus, nd); 1315 } else { 1316 pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); 1317 } 1318 } 1319 1320 for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { 1321 spapr_vscsi_create(spapr->vio_bus); 1322 } 1323 1324 /* Graphics */ 1325 if (spapr_vga_init(phb->bus)) { 1326 spapr->has_graphics = true; 1327 } 1328 1329 if (usb_enabled(spapr->has_graphics)) { 1330 pci_create_simple(phb->bus, -1, "pci-ohci"); 1331 if (spapr->has_graphics) { 1332 usbdevice_create("keyboard"); 1333 usbdevice_create("mouse"); 1334 } 1335 } 1336 1337 if (spapr->rma_size < (MIN_RMA_SLOF << 20)) { 1338 fprintf(stderr, "qemu: pSeries SLOF firmware requires >= " 1339 "%ldM guest RMA (Real Mode Area memory)\n", MIN_RMA_SLOF); 1340 exit(1); 1341 } 1342 1343 if (kernel_filename) { 1344 uint64_t lowaddr = 0; 1345 1346 kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, 1347 NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0); 1348 if (kernel_size == ELF_LOAD_WRONG_ENDIAN) { 1349 kernel_size = load_elf(kernel_filename, 1350 translate_kernel_address, NULL, 1351 NULL, &lowaddr, NULL, 0, ELF_MACHINE, 0); 1352 kernel_le = kernel_size > 0; 1353 } 1354 if (kernel_size < 0) { 1355 fprintf(stderr, "qemu: error loading %s: %s\n", 1356 kernel_filename, load_elf_strerror(kernel_size)); 1357 exit(1); 1358 } 1359 1360 /* load initrd */ 1361 if (initrd_filename) { 1362 /* Try to locate the initrd in the gap between the kernel 1363 * and the firmware. Add a bit of space just in case 1364 */ 1365 initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff; 1366 initrd_size = load_image_targphys(initrd_filename, initrd_base, 1367 load_limit - initrd_base); 1368 if (initrd_size < 0) { 1369 fprintf(stderr, "qemu: could not load initial ram disk '%s'\n", 1370 initrd_filename); 1371 exit(1); 1372 } 1373 } else { 1374 initrd_base = 0; 1375 initrd_size = 0; 1376 } 1377 } 1378 1379 if (bios_name == NULL) { 1380 bios_name = FW_FILE_NAME; 1381 } 1382 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 1383 fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); 1384 if (fw_size < 0) { 1385 hw_error("qemu: could not load LPAR rtas '%s'\n", filename); 1386 exit(1); 1387 } 1388 g_free(filename); 1389 1390 spapr->entry_point = 0x100; 1391 1392 vmstate_register(NULL, 0, &vmstate_spapr, spapr); 1393 register_savevm_live(NULL, "spapr/htab", -1, 1, 1394 &savevm_htab_handlers, spapr); 1395 1396 /* Prepare the device tree */ 1397 spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size, 1398 kernel_size, kernel_le, 1399 boot_device, kernel_cmdline, 1400 spapr->epow_irq); 1401 assert(spapr->fdt_skel != NULL); 1402 } 1403 1404 static int spapr_kvm_type(const char *vm_type) 1405 { 1406 if (!vm_type) { 1407 return 0; 1408 } 1409 1410 if (!strcmp(vm_type, "HV")) { 1411 return 1; 1412 } 1413 1414 if (!strcmp(vm_type, "PR")) { 1415 return 2; 1416 } 1417 1418 error_report("Unknown kvm-type specified '%s'", vm_type); 1419 exit(1); 1420 } 1421 1422 static QEMUMachine spapr_machine = { 1423 .name = "pseries", 1424 .desc = "pSeries Logical Partition (PAPR compliant)", 1425 .is_default = 1, 1426 .init = ppc_spapr_init, 1427 .reset = ppc_spapr_reset, 1428 .block_default_type = IF_SCSI, 1429 .max_cpus = MAX_CPUS, 1430 .no_parallel = 1, 1431 .default_boot_order = NULL, 1432 .kvm_type = spapr_kvm_type, 1433 }; 1434 1435 /* 1436 * Implementation of an interface to adjust firmware patch 1437 * for the bootindex property handling. 1438 */ 1439 static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus, 1440 DeviceState *dev) 1441 { 1442 #define CAST(type, obj, name) \ 1443 ((type *)object_dynamic_cast(OBJECT(obj), (name))) 1444 SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE); 1445 sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE); 1446 1447 if (d) { 1448 void *spapr = CAST(void, bus->parent, "spapr-vscsi"); 1449 VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI); 1450 USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE); 1451 1452 if (spapr) { 1453 /* 1454 * Replace "channel@0/disk@0,0" with "disk@8000000000000000": 1455 * We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun 1456 * in the top 16 bits of the 64-bit LUN 1457 */ 1458 unsigned id = 0x8000 | (d->id << 8) | d->lun; 1459 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 1460 (uint64_t)id << 48); 1461 } else if (virtio) { 1462 /* 1463 * We use SRP luns of the form 01000000 | (target << 8) | lun 1464 * in the top 32 bits of the 64-bit LUN 1465 * Note: the quote above is from SLOF and it is wrong, 1466 * the actual binding is: 1467 * swap 0100 or 10 << or 20 << ( target lun-id -- srplun ) 1468 */ 1469 unsigned id = 0x1000000 | (d->id << 16) | d->lun; 1470 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 1471 (uint64_t)id << 32); 1472 } else if (usb) { 1473 /* 1474 * We use SRP luns of the form 01000000 | (usb-port << 16) | lun 1475 * in the top 32 bits of the 64-bit LUN 1476 */ 1477 unsigned usb_port = atoi(usb->port->path); 1478 unsigned id = 0x1000000 | (usb_port << 16) | d->lun; 1479 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), 1480 (uint64_t)id << 32); 1481 } 1482 } 1483 1484 if (phb) { 1485 /* Replace "pci" with "pci@800000020000000" */ 1486 return g_strdup_printf("pci@%"PRIX64, phb->buid); 1487 } 1488 1489 return NULL; 1490 } 1491 1492 static void spapr_machine_class_init(ObjectClass *oc, void *data) 1493 { 1494 MachineClass *mc = MACHINE_CLASS(oc); 1495 FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); 1496 1497 mc->qemu_machine = data; 1498 fwc->get_dev_path = spapr_get_fw_dev_path; 1499 } 1500 1501 static const TypeInfo spapr_machine_info = { 1502 .name = TYPE_SPAPR_MACHINE, 1503 .parent = TYPE_MACHINE, 1504 .class_init = spapr_machine_class_init, 1505 .class_data = &spapr_machine, 1506 .interfaces = (InterfaceInfo[]) { 1507 { TYPE_FW_PATH_PROVIDER }, 1508 { } 1509 }, 1510 }; 1511 1512 static void spapr_machine_register_types(void) 1513 { 1514 type_register_static(&spapr_machine_info); 1515 } 1516 1517 type_init(spapr_machine_register_types) 1518