1 /* 2 * QEMU sPAPR VIO code 3 * 4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com> 5 * Based on the s390 virtio bus code: 6 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 7 * 8 * This library is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU Lesser General Public 10 * License as published by the Free Software Foundation; either 11 * version 2.1 of the License, or (at your option) any later version. 12 * 13 * This library is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/error-report.h" 24 #include "qapi/error.h" 25 #include "qapi/visitor.h" 26 #include "qemu/log.h" 27 #include "hw/loader.h" 28 #include "elf.h" 29 #include "hw/sysbus.h" 30 #include "sysemu/kvm.h" 31 #include "sysemu/device_tree.h" 32 #include "kvm_ppc.h" 33 #include "migration/vmstate.h" 34 35 #include "hw/ppc/spapr.h" 36 #include "hw/ppc/spapr_vio.h" 37 #include "hw/ppc/fdt.h" 38 #include "trace.h" 39 40 #include <libfdt.h> 41 42 #define SPAPR_VIO_REG_BASE 0x71000000 43 44 static char *spapr_vio_get_dev_name(DeviceState *qdev) 45 { 46 SpaprVioDevice *dev = VIO_SPAPR_DEVICE(qdev); 47 SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 48 49 /* Device tree style name device@reg */ 50 return g_strdup_printf("%s@%x", pc->dt_name, dev->reg); 51 } 52 53 static void spapr_vio_bus_class_init(ObjectClass *klass, void *data) 54 { 55 BusClass *k = BUS_CLASS(klass); 56 57 k->get_dev_path = spapr_vio_get_dev_name; 58 k->get_fw_dev_path = spapr_vio_get_dev_name; 59 } 60 61 static const TypeInfo spapr_vio_bus_info = { 62 .name = TYPE_SPAPR_VIO_BUS, 63 .parent = TYPE_BUS, 64 .class_init = spapr_vio_bus_class_init, 65 .instance_size = sizeof(SpaprVioBus), 66 }; 67 68 SpaprVioDevice *spapr_vio_find_by_reg(SpaprVioBus *bus, uint32_t reg) 69 { 70 BusChild *kid; 71 SpaprVioDevice *dev = NULL; 72 73 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 74 dev = (SpaprVioDevice *)kid->child; 75 if (dev->reg == reg) { 76 return dev; 77 } 78 } 79 80 return NULL; 81 } 82 83 static int vio_make_devnode(SpaprVioDevice *dev, 84 void *fdt) 85 { 86 SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 87 int vdevice_off, node_off, ret; 88 char *dt_name; 89 const char *dt_compatible; 90 91 vdevice_off = fdt_path_offset(fdt, "/vdevice"); 92 if (vdevice_off < 0) { 93 return vdevice_off; 94 } 95 96 dt_name = spapr_vio_get_dev_name(DEVICE(dev)); 97 node_off = fdt_add_subnode(fdt, vdevice_off, dt_name); 98 g_free(dt_name); 99 if (node_off < 0) { 100 return node_off; 101 } 102 103 ret = fdt_setprop_cell(fdt, node_off, "reg", dev->reg); 104 if (ret < 0) { 105 return ret; 106 } 107 108 if (pc->dt_type) { 109 ret = fdt_setprop_string(fdt, node_off, "device_type", 110 pc->dt_type); 111 if (ret < 0) { 112 return ret; 113 } 114 } 115 116 if (pc->get_dt_compatible) { 117 dt_compatible = pc->get_dt_compatible(dev); 118 } else { 119 dt_compatible = pc->dt_compatible; 120 } 121 122 if (dt_compatible) { 123 ret = fdt_setprop_string(fdt, node_off, "compatible", 124 dt_compatible); 125 if (ret < 0) { 126 return ret; 127 } 128 } 129 130 if (dev->irq) { 131 uint32_t ints_prop[2]; 132 133 spapr_dt_irq(ints_prop, dev->irq, false); 134 ret = fdt_setprop(fdt, node_off, "interrupts", ints_prop, 135 sizeof(ints_prop)); 136 if (ret < 0) { 137 return ret; 138 } 139 } 140 141 ret = spapr_tcet_dma_dt(fdt, node_off, "ibm,my-dma-window", dev->tcet); 142 if (ret < 0) { 143 return ret; 144 } 145 146 if (pc->devnode) { 147 ret = (pc->devnode)(dev, fdt, node_off); 148 if (ret < 0) { 149 return ret; 150 } 151 } 152 153 return node_off; 154 } 155 156 /* 157 * CRQ handling 158 */ 159 static target_ulong h_reg_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, 160 target_ulong opcode, target_ulong *args) 161 { 162 target_ulong reg = args[0]; 163 target_ulong queue_addr = args[1]; 164 target_ulong queue_len = args[2]; 165 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 166 167 if (!dev) { 168 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 169 return H_PARAMETER; 170 } 171 172 /* We can't grok a queue size bigger than 256M for now */ 173 if (queue_len < 0x1000 || queue_len > 0x10000000) { 174 hcall_dprintf("Queue size too small or too big (0x" TARGET_FMT_lx 175 ")\n", queue_len); 176 return H_PARAMETER; 177 } 178 179 /* Check queue alignment */ 180 if (queue_addr & 0xfff) { 181 hcall_dprintf("Queue not aligned (0x" TARGET_FMT_lx ")\n", queue_addr); 182 return H_PARAMETER; 183 } 184 185 /* Check if device supports CRQs */ 186 if (!dev->crq.SendFunc) { 187 hcall_dprintf("Device does not support CRQ\n"); 188 return H_NOT_FOUND; 189 } 190 191 /* Already a queue ? */ 192 if (dev->crq.qsize) { 193 hcall_dprintf("CRQ already registered\n"); 194 return H_RESOURCE; 195 } 196 dev->crq.qladdr = queue_addr; 197 dev->crq.qsize = queue_len; 198 dev->crq.qnext = 0; 199 200 trace_spapr_vio_h_reg_crq(reg, queue_addr, queue_len); 201 return H_SUCCESS; 202 } 203 204 static target_ulong free_crq(SpaprVioDevice *dev) 205 { 206 dev->crq.qladdr = 0; 207 dev->crq.qsize = 0; 208 dev->crq.qnext = 0; 209 210 trace_spapr_vio_free_crq(dev->reg); 211 212 return H_SUCCESS; 213 } 214 215 static target_ulong h_free_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, 216 target_ulong opcode, target_ulong *args) 217 { 218 target_ulong reg = args[0]; 219 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 220 221 if (!dev) { 222 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 223 return H_PARAMETER; 224 } 225 226 return free_crq(dev); 227 } 228 229 static target_ulong h_send_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, 230 target_ulong opcode, target_ulong *args) 231 { 232 target_ulong reg = args[0]; 233 target_ulong msg_hi = args[1]; 234 target_ulong msg_lo = args[2]; 235 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 236 uint64_t crq_mangle[2]; 237 238 if (!dev) { 239 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 240 return H_PARAMETER; 241 } 242 crq_mangle[0] = cpu_to_be64(msg_hi); 243 crq_mangle[1] = cpu_to_be64(msg_lo); 244 245 if (dev->crq.SendFunc) { 246 return dev->crq.SendFunc(dev, (uint8_t *)crq_mangle); 247 } 248 249 return H_HARDWARE; 250 } 251 252 static target_ulong h_enable_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, 253 target_ulong opcode, target_ulong *args) 254 { 255 target_ulong reg = args[0]; 256 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 257 258 if (!dev) { 259 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 260 return H_PARAMETER; 261 } 262 263 return 0; 264 } 265 266 /* Returns negative error, 0 success, or positive: queue full */ 267 int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq) 268 { 269 int rc; 270 uint8_t byte; 271 272 if (!dev->crq.qsize) { 273 error_report("spapr_vio_send_creq on uninitialized queue"); 274 return -1; 275 } 276 277 /* Maybe do a fast path for KVM just writing to the pages */ 278 rc = spapr_vio_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1); 279 if (rc) { 280 return rc; 281 } 282 if (byte != 0) { 283 return 1; 284 } 285 286 rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8, 287 &crq[8], 8); 288 if (rc) { 289 return rc; 290 } 291 292 kvmppc_eieio(); 293 294 rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8); 295 if (rc) { 296 return rc; 297 } 298 299 dev->crq.qnext = (dev->crq.qnext + 16) % dev->crq.qsize; 300 301 if (dev->signal_state & 1) { 302 spapr_vio_irq_pulse(dev); 303 } 304 305 return 0; 306 } 307 308 /* "quiesce" handling */ 309 310 static void spapr_vio_quiesce_one(SpaprVioDevice *dev) 311 { 312 if (dev->tcet) { 313 device_cold_reset(DEVICE(dev->tcet)); 314 } 315 free_crq(dev); 316 } 317 318 void spapr_vio_set_bypass(SpaprVioDevice *dev, bool bypass) 319 { 320 if (!dev->tcet) { 321 return; 322 } 323 324 memory_region_set_enabled(&dev->mrbypass, bypass); 325 memory_region_set_enabled(spapr_tce_get_iommu(dev->tcet), !bypass); 326 327 dev->tcet->bypass = bypass; 328 } 329 330 static void rtas_set_tce_bypass(PowerPCCPU *cpu, SpaprMachineState *spapr, 331 uint32_t token, 332 uint32_t nargs, target_ulong args, 333 uint32_t nret, target_ulong rets) 334 { 335 SpaprVioBus *bus = spapr->vio_bus; 336 SpaprVioDevice *dev; 337 uint32_t unit, enable; 338 339 if (nargs != 2) { 340 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 341 return; 342 } 343 unit = rtas_ld(args, 0); 344 enable = rtas_ld(args, 1); 345 dev = spapr_vio_find_by_reg(bus, unit); 346 if (!dev) { 347 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 348 return; 349 } 350 351 if (!dev->tcet) { 352 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 353 return; 354 } 355 356 spapr_vio_set_bypass(dev, !!enable); 357 358 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 359 } 360 361 static void rtas_quiesce(PowerPCCPU *cpu, SpaprMachineState *spapr, 362 uint32_t token, 363 uint32_t nargs, target_ulong args, 364 uint32_t nret, target_ulong rets) 365 { 366 SpaprVioBus *bus = spapr->vio_bus; 367 BusChild *kid; 368 SpaprVioDevice *dev = NULL; 369 370 if (nargs != 0) { 371 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 372 return; 373 } 374 375 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 376 dev = (SpaprVioDevice *)kid->child; 377 spapr_vio_quiesce_one(dev); 378 } 379 380 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 381 } 382 383 static SpaprVioDevice *reg_conflict(SpaprVioDevice *dev) 384 { 385 SpaprVioBus *bus = SPAPR_VIO_BUS(dev->qdev.parent_bus); 386 BusChild *kid; 387 SpaprVioDevice *other; 388 389 /* 390 * Check for a device other than the given one which is already 391 * using the requested address. We have to open code this because 392 * the given dev might already be in the list. 393 */ 394 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 395 other = VIO_SPAPR_DEVICE(kid->child); 396 397 if (other != dev && other->reg == dev->reg) { 398 return other; 399 } 400 } 401 402 return 0; 403 } 404 405 static void spapr_vio_busdev_reset(DeviceState *qdev) 406 { 407 SpaprVioDevice *dev = VIO_SPAPR_DEVICE(qdev); 408 SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 409 410 /* Shut down the request queue and TCEs if necessary */ 411 spapr_vio_quiesce_one(dev); 412 413 dev->signal_state = 0; 414 415 spapr_vio_set_bypass(dev, false); 416 if (pc->reset) { 417 pc->reset(dev); 418 } 419 } 420 421 /* 422 * The register property of a VIO device is defined in libvirt using 423 * 0x1000 as a base register number plus a 0x1000 increment. For the 424 * VIO tty device, the base number is changed to 0x30000000. QEMU uses 425 * a base register number of 0x71000000 and then a simple increment. 426 * 427 * The formula below tries to compute a unique index number from the 428 * register value that will be used to define the IRQ number of the 429 * VIO device. 430 * 431 * A maximum of 256 VIO devices is covered. Collisions are possible 432 * but they will be detected when the IRQ is claimed. 433 */ 434 static inline uint32_t spapr_vio_reg_to_irq(uint32_t reg) 435 { 436 uint32_t irq; 437 438 if (reg >= SPAPR_VIO_REG_BASE) { 439 /* 440 * VIO device register values when allocated by QEMU. For 441 * these, we simply mask the high bits to fit the overall 442 * range: [0x00 - 0xff]. 443 * 444 * The nvram VIO device (reg=0x71000000) is a static device of 445 * the pseries machine and so is always allocated by QEMU. Its 446 * IRQ number is 0x0. 447 */ 448 irq = reg & 0xff; 449 450 } else if (reg >= 0x30000000) { 451 /* 452 * VIO tty devices register values, when allocated by libvirt, 453 * are mapped in range [0xf0 - 0xff], gives us a maximum of 16 454 * vtys. 455 */ 456 irq = 0xf0 | ((reg >> 12) & 0xf); 457 458 } else { 459 /* 460 * Other VIO devices register values, when allocated by 461 * libvirt, should be mapped in range [0x00 - 0xef]. Conflicts 462 * will be detected when IRQ is claimed. 463 */ 464 irq = (reg >> 12) & 0xff; 465 } 466 467 return SPAPR_IRQ_VIO | irq; 468 } 469 470 static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp) 471 { 472 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 473 SpaprVioDevice *dev = (SpaprVioDevice *)qdev; 474 SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 475 char *id; 476 477 if (dev->reg != -1) { 478 /* 479 * Explicitly assigned address, just verify that no-one else 480 * is using it. other mechanism). We have to open code this 481 * rather than using spapr_vio_find_by_reg() because sdev 482 * itself is already in the list. 483 */ 484 SpaprVioDevice *other = reg_conflict(dev); 485 486 if (other) { 487 error_setg(errp, "%s and %s devices conflict at address %#x", 488 object_get_typename(OBJECT(qdev)), 489 object_get_typename(OBJECT(&other->qdev)), 490 dev->reg); 491 return; 492 } 493 } else { 494 /* Need to assign an address */ 495 SpaprVioBus *bus = SPAPR_VIO_BUS(dev->qdev.parent_bus); 496 497 do { 498 dev->reg = bus->next_reg++; 499 } while (reg_conflict(dev)); 500 } 501 502 /* Don't overwrite ids assigned on the command line */ 503 if (!dev->qdev.id) { 504 id = spapr_vio_get_dev_name(DEVICE(dev)); 505 dev->qdev.id = id; 506 } 507 508 dev->irq = spapr_vio_reg_to_irq(dev->reg); 509 510 if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 511 int irq = spapr_irq_findone(spapr, errp); 512 513 if (irq < 0) { 514 return; 515 } 516 dev->irq = irq; 517 } 518 519 if (spapr_irq_claim(spapr, dev->irq, false, errp) < 0) { 520 return; 521 } 522 523 if (pc->rtce_window_size) { 524 uint32_t liobn = SPAPR_VIO_LIOBN(dev->reg); 525 526 memory_region_init(&dev->mrroot, OBJECT(dev), "iommu-spapr-root", 527 MACHINE(spapr)->ram_size); 528 memory_region_init_alias(&dev->mrbypass, OBJECT(dev), 529 "iommu-spapr-bypass", get_system_memory(), 530 0, MACHINE(spapr)->ram_size); 531 memory_region_add_subregion_overlap(&dev->mrroot, 0, &dev->mrbypass, 1); 532 address_space_init(&dev->as, &dev->mrroot, qdev->id); 533 534 dev->tcet = spapr_tce_new_table(qdev, liobn); 535 spapr_tce_table_enable(dev->tcet, SPAPR_TCE_PAGE_SHIFT, 0, 536 pc->rtce_window_size >> SPAPR_TCE_PAGE_SHIFT); 537 dev->tcet->vdev = dev; 538 memory_region_add_subregion_overlap(&dev->mrroot, 0, 539 spapr_tce_get_iommu(dev->tcet), 2); 540 } 541 542 pc->realize(dev, errp); 543 } 544 545 static target_ulong h_vio_signal(PowerPCCPU *cpu, SpaprMachineState *spapr, 546 target_ulong opcode, 547 target_ulong *args) 548 { 549 target_ulong reg = args[0]; 550 target_ulong mode = args[1]; 551 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 552 SpaprVioDeviceClass *pc; 553 554 if (!dev) { 555 return H_PARAMETER; 556 } 557 558 pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 559 560 if (mode & ~pc->signal_mask) { 561 return H_PARAMETER; 562 } 563 564 dev->signal_state = mode; 565 566 return H_SUCCESS; 567 } 568 569 SpaprVioBus *spapr_vio_bus_init(void) 570 { 571 SpaprVioBus *bus; 572 BusState *qbus; 573 DeviceState *dev; 574 575 /* Create bridge device */ 576 dev = qdev_new(TYPE_SPAPR_VIO_BRIDGE); 577 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 578 579 /* Create bus on bridge device */ 580 qbus = qbus_create(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio"); 581 bus = SPAPR_VIO_BUS(qbus); 582 bus->next_reg = SPAPR_VIO_REG_BASE; 583 584 /* hcall-vio */ 585 spapr_register_hypercall(H_VIO_SIGNAL, h_vio_signal); 586 587 /* hcall-crq */ 588 spapr_register_hypercall(H_REG_CRQ, h_reg_crq); 589 spapr_register_hypercall(H_FREE_CRQ, h_free_crq); 590 spapr_register_hypercall(H_SEND_CRQ, h_send_crq); 591 spapr_register_hypercall(H_ENABLE_CRQ, h_enable_crq); 592 593 /* RTAS calls */ 594 spapr_rtas_register(RTAS_IBM_SET_TCE_BYPASS, "ibm,set-tce-bypass", 595 rtas_set_tce_bypass); 596 spapr_rtas_register(RTAS_QUIESCE, "quiesce", rtas_quiesce); 597 598 return bus; 599 } 600 601 static void spapr_vio_bridge_class_init(ObjectClass *klass, void *data) 602 { 603 DeviceClass *dc = DEVICE_CLASS(klass); 604 605 dc->fw_name = "vdevice"; 606 } 607 608 static const TypeInfo spapr_vio_bridge_info = { 609 .name = TYPE_SPAPR_VIO_BRIDGE, 610 .parent = TYPE_SYS_BUS_DEVICE, 611 .class_init = spapr_vio_bridge_class_init, 612 }; 613 614 const VMStateDescription vmstate_spapr_vio = { 615 .name = "spapr_vio", 616 .version_id = 1, 617 .minimum_version_id = 1, 618 .fields = (VMStateField[]) { 619 /* Sanity check */ 620 VMSTATE_UINT32_EQUAL(reg, SpaprVioDevice, NULL), 621 VMSTATE_UINT32_EQUAL(irq, SpaprVioDevice, NULL), 622 623 /* General VIO device state */ 624 VMSTATE_UINT64(signal_state, SpaprVioDevice), 625 VMSTATE_UINT64(crq.qladdr, SpaprVioDevice), 626 VMSTATE_UINT32(crq.qsize, SpaprVioDevice), 627 VMSTATE_UINT32(crq.qnext, SpaprVioDevice), 628 629 VMSTATE_END_OF_LIST() 630 }, 631 }; 632 633 static void vio_spapr_device_class_init(ObjectClass *klass, void *data) 634 { 635 DeviceClass *k = DEVICE_CLASS(klass); 636 k->realize = spapr_vio_busdev_realize; 637 k->reset = spapr_vio_busdev_reset; 638 k->bus_type = TYPE_SPAPR_VIO_BUS; 639 } 640 641 static const TypeInfo spapr_vio_type_info = { 642 .name = TYPE_VIO_SPAPR_DEVICE, 643 .parent = TYPE_DEVICE, 644 .instance_size = sizeof(SpaprVioDevice), 645 .abstract = true, 646 .class_size = sizeof(SpaprVioDeviceClass), 647 .class_init = vio_spapr_device_class_init, 648 }; 649 650 static void spapr_vio_register_types(void) 651 { 652 type_register_static(&spapr_vio_bus_info); 653 type_register_static(&spapr_vio_bridge_info); 654 type_register_static(&spapr_vio_type_info); 655 } 656 657 type_init(spapr_vio_register_types) 658 659 static int compare_reg(const void *p1, const void *p2) 660 { 661 SpaprVioDevice const *dev1, *dev2; 662 663 dev1 = (SpaprVioDevice *)*(DeviceState **)p1; 664 dev2 = (SpaprVioDevice *)*(DeviceState **)p2; 665 666 if (dev1->reg < dev2->reg) { 667 return -1; 668 } 669 if (dev1->reg == dev2->reg) { 670 return 0; 671 } 672 673 /* dev1->reg > dev2->reg */ 674 return 1; 675 } 676 677 void spapr_dt_vdevice(SpaprVioBus *bus, void *fdt) 678 { 679 DeviceState *qdev, **qdevs; 680 BusChild *kid; 681 int i, num, ret = 0; 682 int node; 683 684 _FDT(node = fdt_add_subnode(fdt, 0, "vdevice")); 685 686 _FDT(fdt_setprop_string(fdt, node, "device_type", "vdevice")); 687 _FDT(fdt_setprop_string(fdt, node, "compatible", "IBM,vdevice")); 688 _FDT(fdt_setprop_cell(fdt, node, "#address-cells", 1)); 689 _FDT(fdt_setprop_cell(fdt, node, "#size-cells", 0)); 690 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); 691 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); 692 693 /* Count qdevs on the bus list */ 694 num = 0; 695 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 696 num++; 697 } 698 699 /* Copy out into an array of pointers */ 700 qdevs = g_new(DeviceState *, num); 701 num = 0; 702 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 703 qdevs[num++] = kid->child; 704 } 705 706 /* Sort the array */ 707 qsort(qdevs, num, sizeof(qdev), compare_reg); 708 709 /* Hack alert. Give the devices to libfdt in reverse order, we happen 710 * to know that will mean they are in forward order in the tree. */ 711 for (i = num - 1; i >= 0; i--) { 712 SpaprVioDevice *dev = (SpaprVioDevice *)(qdevs[i]); 713 SpaprVioDeviceClass *vdc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 714 715 ret = vio_make_devnode(dev, fdt); 716 if (ret < 0) { 717 error_report("Couldn't create device node /vdevice/%s@%"PRIx32, 718 vdc->dt_name, dev->reg); 719 exit(1); 720 } 721 } 722 723 g_free(qdevs); 724 } 725 726 gchar *spapr_vio_stdout_path(SpaprVioBus *bus) 727 { 728 SpaprVioDevice *dev; 729 char *name, *path; 730 731 dev = spapr_vty_get_default(bus); 732 if (!dev) { 733 return NULL; 734 } 735 736 name = spapr_vio_get_dev_name(DEVICE(dev)); 737 path = g_strdup_printf("/vdevice/%s", name); 738 739 g_free(name); 740 return path; 741 } 742