1 /* 2 * QEMU sPAPR VIO code 3 * 4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com> 5 * Based on the s390 virtio bus code: 6 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 7 * 8 * This library is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU Lesser General Public 10 * License as published by the Free Software Foundation; either 11 * version 2 of the License, or (at your option) any later version. 12 * 13 * This library is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/error-report.h" 24 #include "qapi/error.h" 25 #include "qapi/visitor.h" 26 #include "qemu/log.h" 27 #include "hw/loader.h" 28 #include "elf.h" 29 #include "hw/sysbus.h" 30 #include "sysemu/kvm.h" 31 #include "sysemu/device_tree.h" 32 #include "kvm_ppc.h" 33 #include "migration/vmstate.h" 34 #include "sysemu/qtest.h" 35 36 #include "hw/ppc/spapr.h" 37 #include "hw/ppc/spapr_vio.h" 38 #include "hw/ppc/fdt.h" 39 #include "trace.h" 40 41 #include <libfdt.h> 42 43 #define SPAPR_VIO_REG_BASE 0x71000000 44 45 static char *spapr_vio_get_dev_name(DeviceState *qdev) 46 { 47 SpaprVioDevice *dev = VIO_SPAPR_DEVICE(qdev); 48 SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 49 50 /* Device tree style name device@reg */ 51 return g_strdup_printf("%s@%x", pc->dt_name, dev->reg); 52 } 53 54 static void spapr_vio_bus_class_init(ObjectClass *klass, void *data) 55 { 56 BusClass *k = BUS_CLASS(klass); 57 58 k->get_dev_path = spapr_vio_get_dev_name; 59 k->get_fw_dev_path = spapr_vio_get_dev_name; 60 } 61 62 static const TypeInfo spapr_vio_bus_info = { 63 .name = TYPE_SPAPR_VIO_BUS, 64 .parent = TYPE_BUS, 65 .class_init = spapr_vio_bus_class_init, 66 .instance_size = sizeof(SpaprVioBus), 67 }; 68 69 SpaprVioDevice *spapr_vio_find_by_reg(SpaprVioBus *bus, uint32_t reg) 70 { 71 BusChild *kid; 72 SpaprVioDevice *dev = NULL; 73 74 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 75 dev = (SpaprVioDevice *)kid->child; 76 if (dev->reg == reg) { 77 return dev; 78 } 79 } 80 81 return NULL; 82 } 83 84 static int vio_make_devnode(SpaprVioDevice *dev, 85 void *fdt) 86 { 87 SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 88 int vdevice_off, node_off, ret; 89 char *dt_name; 90 91 vdevice_off = fdt_path_offset(fdt, "/vdevice"); 92 if (vdevice_off < 0) { 93 return vdevice_off; 94 } 95 96 dt_name = spapr_vio_get_dev_name(DEVICE(dev)); 97 node_off = fdt_add_subnode(fdt, vdevice_off, dt_name); 98 g_free(dt_name); 99 if (node_off < 0) { 100 return node_off; 101 } 102 103 ret = fdt_setprop_cell(fdt, node_off, "reg", dev->reg); 104 if (ret < 0) { 105 return ret; 106 } 107 108 if (pc->dt_type) { 109 ret = fdt_setprop_string(fdt, node_off, "device_type", 110 pc->dt_type); 111 if (ret < 0) { 112 return ret; 113 } 114 } 115 116 if (pc->dt_compatible) { 117 ret = fdt_setprop_string(fdt, node_off, "compatible", 118 pc->dt_compatible); 119 if (ret < 0) { 120 return ret; 121 } 122 } 123 124 if (dev->irq) { 125 uint32_t ints_prop[2]; 126 127 spapr_dt_irq(ints_prop, dev->irq, false); 128 ret = fdt_setprop(fdt, node_off, "interrupts", ints_prop, 129 sizeof(ints_prop)); 130 if (ret < 0) { 131 return ret; 132 } 133 } 134 135 ret = spapr_tcet_dma_dt(fdt, node_off, "ibm,my-dma-window", dev->tcet); 136 if (ret < 0) { 137 return ret; 138 } 139 140 if (pc->devnode) { 141 ret = (pc->devnode)(dev, fdt, node_off); 142 if (ret < 0) { 143 return ret; 144 } 145 } 146 147 return node_off; 148 } 149 150 /* 151 * CRQ handling 152 */ 153 static target_ulong h_reg_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, 154 target_ulong opcode, target_ulong *args) 155 { 156 target_ulong reg = args[0]; 157 target_ulong queue_addr = args[1]; 158 target_ulong queue_len = args[2]; 159 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 160 161 if (!dev) { 162 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 163 return H_PARAMETER; 164 } 165 166 /* We can't grok a queue size bigger than 256M for now */ 167 if (queue_len < 0x1000 || queue_len > 0x10000000) { 168 hcall_dprintf("Queue size too small or too big (0x" TARGET_FMT_lx 169 ")\n", queue_len); 170 return H_PARAMETER; 171 } 172 173 /* Check queue alignment */ 174 if (queue_addr & 0xfff) { 175 hcall_dprintf("Queue not aligned (0x" TARGET_FMT_lx ")\n", queue_addr); 176 return H_PARAMETER; 177 } 178 179 /* Check if device supports CRQs */ 180 if (!dev->crq.SendFunc) { 181 hcall_dprintf("Device does not support CRQ\n"); 182 return H_NOT_FOUND; 183 } 184 185 /* Already a queue ? */ 186 if (dev->crq.qsize) { 187 hcall_dprintf("CRQ already registered\n"); 188 return H_RESOURCE; 189 } 190 dev->crq.qladdr = queue_addr; 191 dev->crq.qsize = queue_len; 192 dev->crq.qnext = 0; 193 194 trace_spapr_vio_h_reg_crq(reg, queue_addr, queue_len); 195 return H_SUCCESS; 196 } 197 198 static target_ulong free_crq(SpaprVioDevice *dev) 199 { 200 dev->crq.qladdr = 0; 201 dev->crq.qsize = 0; 202 dev->crq.qnext = 0; 203 204 trace_spapr_vio_free_crq(dev->reg); 205 206 return H_SUCCESS; 207 } 208 209 static target_ulong h_free_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, 210 target_ulong opcode, target_ulong *args) 211 { 212 target_ulong reg = args[0]; 213 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 214 215 if (!dev) { 216 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 217 return H_PARAMETER; 218 } 219 220 return free_crq(dev); 221 } 222 223 static target_ulong h_send_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, 224 target_ulong opcode, target_ulong *args) 225 { 226 target_ulong reg = args[0]; 227 target_ulong msg_hi = args[1]; 228 target_ulong msg_lo = args[2]; 229 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 230 uint64_t crq_mangle[2]; 231 232 if (!dev) { 233 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 234 return H_PARAMETER; 235 } 236 crq_mangle[0] = cpu_to_be64(msg_hi); 237 crq_mangle[1] = cpu_to_be64(msg_lo); 238 239 if (dev->crq.SendFunc) { 240 return dev->crq.SendFunc(dev, (uint8_t *)crq_mangle); 241 } 242 243 return H_HARDWARE; 244 } 245 246 static target_ulong h_enable_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, 247 target_ulong opcode, target_ulong *args) 248 { 249 target_ulong reg = args[0]; 250 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 251 252 if (!dev) { 253 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 254 return H_PARAMETER; 255 } 256 257 return 0; 258 } 259 260 /* Returns negative error, 0 success, or positive: queue full */ 261 int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq) 262 { 263 int rc; 264 uint8_t byte; 265 266 if (!dev->crq.qsize) { 267 error_report("spapr_vio_send_creq on uninitialized queue"); 268 return -1; 269 } 270 271 /* Maybe do a fast path for KVM just writing to the pages */ 272 rc = spapr_vio_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1); 273 if (rc) { 274 return rc; 275 } 276 if (byte != 0) { 277 return 1; 278 } 279 280 rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8, 281 &crq[8], 8); 282 if (rc) { 283 return rc; 284 } 285 286 kvmppc_eieio(); 287 288 rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8); 289 if (rc) { 290 return rc; 291 } 292 293 dev->crq.qnext = (dev->crq.qnext + 16) % dev->crq.qsize; 294 295 if (dev->signal_state & 1) { 296 spapr_vio_irq_pulse(dev); 297 } 298 299 return 0; 300 } 301 302 /* "quiesce" handling */ 303 304 static void spapr_vio_quiesce_one(SpaprVioDevice *dev) 305 { 306 if (dev->tcet) { 307 device_legacy_reset(DEVICE(dev->tcet)); 308 } 309 free_crq(dev); 310 } 311 312 void spapr_vio_set_bypass(SpaprVioDevice *dev, bool bypass) 313 { 314 if (!dev->tcet) { 315 return; 316 } 317 318 memory_region_set_enabled(&dev->mrbypass, bypass); 319 memory_region_set_enabled(spapr_tce_get_iommu(dev->tcet), !bypass); 320 321 dev->tcet->bypass = bypass; 322 } 323 324 static void rtas_set_tce_bypass(PowerPCCPU *cpu, SpaprMachineState *spapr, 325 uint32_t token, 326 uint32_t nargs, target_ulong args, 327 uint32_t nret, target_ulong rets) 328 { 329 SpaprVioBus *bus = spapr->vio_bus; 330 SpaprVioDevice *dev; 331 uint32_t unit, enable; 332 333 if (nargs != 2) { 334 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 335 return; 336 } 337 unit = rtas_ld(args, 0); 338 enable = rtas_ld(args, 1); 339 dev = spapr_vio_find_by_reg(bus, unit); 340 if (!dev) { 341 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 342 return; 343 } 344 345 if (!dev->tcet) { 346 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 347 return; 348 } 349 350 spapr_vio_set_bypass(dev, !!enable); 351 352 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 353 } 354 355 static void rtas_quiesce(PowerPCCPU *cpu, SpaprMachineState *spapr, 356 uint32_t token, 357 uint32_t nargs, target_ulong args, 358 uint32_t nret, target_ulong rets) 359 { 360 SpaprVioBus *bus = spapr->vio_bus; 361 BusChild *kid; 362 SpaprVioDevice *dev = NULL; 363 364 if (nargs != 0) { 365 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 366 return; 367 } 368 369 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 370 dev = (SpaprVioDevice *)kid->child; 371 spapr_vio_quiesce_one(dev); 372 } 373 374 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 375 } 376 377 static SpaprVioDevice *reg_conflict(SpaprVioDevice *dev) 378 { 379 SpaprVioBus *bus = SPAPR_VIO_BUS(dev->qdev.parent_bus); 380 BusChild *kid; 381 SpaprVioDevice *other; 382 383 /* 384 * Check for a device other than the given one which is already 385 * using the requested address. We have to open code this because 386 * the given dev might already be in the list. 387 */ 388 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 389 other = VIO_SPAPR_DEVICE(kid->child); 390 391 if (other != dev && other->reg == dev->reg) { 392 return other; 393 } 394 } 395 396 return 0; 397 } 398 399 static void spapr_vio_busdev_reset(DeviceState *qdev) 400 { 401 SpaprVioDevice *dev = VIO_SPAPR_DEVICE(qdev); 402 SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 403 404 /* Shut down the request queue and TCEs if necessary */ 405 spapr_vio_quiesce_one(dev); 406 407 dev->signal_state = 0; 408 409 spapr_vio_set_bypass(dev, false); 410 if (pc->reset) { 411 pc->reset(dev); 412 } 413 } 414 415 /* 416 * The register property of a VIO device is defined in livirt using 417 * 0x1000 as a base register number plus a 0x1000 increment. For the 418 * VIO tty device, the base number is changed to 0x30000000. QEMU uses 419 * a base register number of 0x71000000 and then a simple increment. 420 * 421 * The formula below tries to compute a unique index number from the 422 * register value that will be used to define the IRQ number of the 423 * VIO device. 424 * 425 * A maximum of 256 VIO devices is covered. Collisions are possible 426 * but they will be detected when the IRQ is claimed. 427 */ 428 static inline uint32_t spapr_vio_reg_to_irq(uint32_t reg) 429 { 430 uint32_t irq; 431 432 if (reg >= SPAPR_VIO_REG_BASE) { 433 /* 434 * VIO device register values when allocated by QEMU. For 435 * these, we simply mask the high bits to fit the overall 436 * range: [0x00 - 0xff]. 437 * 438 * The nvram VIO device (reg=0x71000000) is a static device of 439 * the pseries machine and so is always allocated by QEMU. Its 440 * IRQ number is 0x0. 441 */ 442 irq = reg & 0xff; 443 444 } else if (reg >= 0x30000000) { 445 /* 446 * VIO tty devices register values, when allocated by livirt, 447 * are mapped in range [0xf0 - 0xff], gives us a maximum of 16 448 * vtys. 449 */ 450 irq = 0xf0 | ((reg >> 12) & 0xf); 451 452 } else { 453 /* 454 * Other VIO devices register values, when allocated by 455 * livirt, should be mapped in range [0x00 - 0xef]. Conflicts 456 * will be detected when IRQ is claimed. 457 */ 458 irq = (reg >> 12) & 0xff; 459 } 460 461 return SPAPR_IRQ_VIO | irq; 462 } 463 464 static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp) 465 { 466 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 467 SpaprVioDevice *dev = (SpaprVioDevice *)qdev; 468 SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 469 char *id; 470 Error *local_err = NULL; 471 472 if (dev->reg != -1) { 473 /* 474 * Explicitly assigned address, just verify that no-one else 475 * is using it. other mechanism). We have to open code this 476 * rather than using spapr_vio_find_by_reg() because sdev 477 * itself is already in the list. 478 */ 479 SpaprVioDevice *other = reg_conflict(dev); 480 481 if (other) { 482 error_setg(errp, "%s and %s devices conflict at address %#x", 483 object_get_typename(OBJECT(qdev)), 484 object_get_typename(OBJECT(&other->qdev)), 485 dev->reg); 486 return; 487 } 488 } else { 489 /* Need to assign an address */ 490 SpaprVioBus *bus = SPAPR_VIO_BUS(dev->qdev.parent_bus); 491 492 do { 493 dev->reg = bus->next_reg++; 494 } while (reg_conflict(dev)); 495 } 496 497 /* Don't overwrite ids assigned on the command line */ 498 if (!dev->qdev.id) { 499 id = spapr_vio_get_dev_name(DEVICE(dev)); 500 dev->qdev.id = id; 501 } 502 503 dev->irq = spapr_vio_reg_to_irq(dev->reg); 504 505 if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 506 dev->irq = spapr_irq_findone(spapr, &local_err); 507 if (local_err) { 508 error_propagate(errp, local_err); 509 return; 510 } 511 } 512 513 spapr_irq_claim(spapr, dev->irq, false, &local_err); 514 if (local_err) { 515 error_propagate(errp, local_err); 516 return; 517 } 518 519 if (pc->rtce_window_size) { 520 uint32_t liobn = SPAPR_VIO_LIOBN(dev->reg); 521 522 memory_region_init(&dev->mrroot, OBJECT(dev), "iommu-spapr-root", 523 ram_size); 524 memory_region_init_alias(&dev->mrbypass, OBJECT(dev), 525 "iommu-spapr-bypass", get_system_memory(), 526 0, ram_size); 527 memory_region_add_subregion_overlap(&dev->mrroot, 0, &dev->mrbypass, 1); 528 address_space_init(&dev->as, &dev->mrroot, qdev->id); 529 530 dev->tcet = spapr_tce_new_table(qdev, liobn); 531 spapr_tce_table_enable(dev->tcet, SPAPR_TCE_PAGE_SHIFT, 0, 532 pc->rtce_window_size >> SPAPR_TCE_PAGE_SHIFT); 533 dev->tcet->vdev = dev; 534 memory_region_add_subregion_overlap(&dev->mrroot, 0, 535 spapr_tce_get_iommu(dev->tcet), 2); 536 } 537 538 pc->realize(dev, errp); 539 } 540 541 static target_ulong h_vio_signal(PowerPCCPU *cpu, SpaprMachineState *spapr, 542 target_ulong opcode, 543 target_ulong *args) 544 { 545 target_ulong reg = args[0]; 546 target_ulong mode = args[1]; 547 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 548 SpaprVioDeviceClass *pc; 549 550 if (!dev) { 551 return H_PARAMETER; 552 } 553 554 pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 555 556 if (mode & ~pc->signal_mask) { 557 return H_PARAMETER; 558 } 559 560 dev->signal_state = mode; 561 562 return H_SUCCESS; 563 } 564 565 SpaprVioBus *spapr_vio_bus_init(void) 566 { 567 SpaprVioBus *bus; 568 BusState *qbus; 569 DeviceState *dev; 570 571 /* Create bridge device */ 572 dev = qdev_create(NULL, TYPE_SPAPR_VIO_BRIDGE); 573 qdev_init_nofail(dev); 574 575 /* Create bus on bridge device */ 576 qbus = qbus_create(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio"); 577 bus = SPAPR_VIO_BUS(qbus); 578 bus->next_reg = SPAPR_VIO_REG_BASE; 579 580 /* hcall-vio */ 581 spapr_register_hypercall(H_VIO_SIGNAL, h_vio_signal); 582 583 /* hcall-crq */ 584 spapr_register_hypercall(H_REG_CRQ, h_reg_crq); 585 spapr_register_hypercall(H_FREE_CRQ, h_free_crq); 586 spapr_register_hypercall(H_SEND_CRQ, h_send_crq); 587 spapr_register_hypercall(H_ENABLE_CRQ, h_enable_crq); 588 589 /* RTAS calls */ 590 spapr_rtas_register(RTAS_IBM_SET_TCE_BYPASS, "ibm,set-tce-bypass", 591 rtas_set_tce_bypass); 592 spapr_rtas_register(RTAS_QUIESCE, "quiesce", rtas_quiesce); 593 594 return bus; 595 } 596 597 static void spapr_vio_bridge_class_init(ObjectClass *klass, void *data) 598 { 599 DeviceClass *dc = DEVICE_CLASS(klass); 600 601 dc->fw_name = "vdevice"; 602 } 603 604 static const TypeInfo spapr_vio_bridge_info = { 605 .name = TYPE_SPAPR_VIO_BRIDGE, 606 .parent = TYPE_SYS_BUS_DEVICE, 607 .class_init = spapr_vio_bridge_class_init, 608 }; 609 610 const VMStateDescription vmstate_spapr_vio = { 611 .name = "spapr_vio", 612 .version_id = 1, 613 .minimum_version_id = 1, 614 .fields = (VMStateField[]) { 615 /* Sanity check */ 616 VMSTATE_UINT32_EQUAL(reg, SpaprVioDevice, NULL), 617 VMSTATE_UINT32_EQUAL(irq, SpaprVioDevice, NULL), 618 619 /* General VIO device state */ 620 VMSTATE_UINT64(signal_state, SpaprVioDevice), 621 VMSTATE_UINT64(crq.qladdr, SpaprVioDevice), 622 VMSTATE_UINT32(crq.qsize, SpaprVioDevice), 623 VMSTATE_UINT32(crq.qnext, SpaprVioDevice), 624 625 VMSTATE_END_OF_LIST() 626 }, 627 }; 628 629 static void vio_spapr_device_class_init(ObjectClass *klass, void *data) 630 { 631 DeviceClass *k = DEVICE_CLASS(klass); 632 k->realize = spapr_vio_busdev_realize; 633 k->reset = spapr_vio_busdev_reset; 634 k->bus_type = TYPE_SPAPR_VIO_BUS; 635 } 636 637 static const TypeInfo spapr_vio_type_info = { 638 .name = TYPE_VIO_SPAPR_DEVICE, 639 .parent = TYPE_DEVICE, 640 .instance_size = sizeof(SpaprVioDevice), 641 .abstract = true, 642 .class_size = sizeof(SpaprVioDeviceClass), 643 .class_init = vio_spapr_device_class_init, 644 }; 645 646 static void spapr_vio_register_types(void) 647 { 648 type_register_static(&spapr_vio_bus_info); 649 type_register_static(&spapr_vio_bridge_info); 650 type_register_static(&spapr_vio_type_info); 651 } 652 653 type_init(spapr_vio_register_types) 654 655 static int compare_reg(const void *p1, const void *p2) 656 { 657 SpaprVioDevice const *dev1, *dev2; 658 659 dev1 = (SpaprVioDevice *)*(DeviceState **)p1; 660 dev2 = (SpaprVioDevice *)*(DeviceState **)p2; 661 662 if (dev1->reg < dev2->reg) { 663 return -1; 664 } 665 if (dev1->reg == dev2->reg) { 666 return 0; 667 } 668 669 /* dev1->reg > dev2->reg */ 670 return 1; 671 } 672 673 void spapr_dt_vdevice(SpaprVioBus *bus, void *fdt) 674 { 675 DeviceState *qdev, **qdevs; 676 BusChild *kid; 677 int i, num, ret = 0; 678 int node; 679 680 _FDT(node = fdt_add_subnode(fdt, 0, "vdevice")); 681 682 _FDT(fdt_setprop_string(fdt, node, "device_type", "vdevice")); 683 _FDT(fdt_setprop_string(fdt, node, "compatible", "IBM,vdevice")); 684 _FDT(fdt_setprop_cell(fdt, node, "#address-cells", 1)); 685 _FDT(fdt_setprop_cell(fdt, node, "#size-cells", 0)); 686 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); 687 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); 688 689 /* Count qdevs on the bus list */ 690 num = 0; 691 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 692 num++; 693 } 694 695 /* Copy out into an array of pointers */ 696 qdevs = g_new(DeviceState *, num); 697 num = 0; 698 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 699 qdevs[num++] = kid->child; 700 } 701 702 /* Sort the array */ 703 qsort(qdevs, num, sizeof(qdev), compare_reg); 704 705 /* Hack alert. Give the devices to libfdt in reverse order, we happen 706 * to know that will mean they are in forward order in the tree. */ 707 for (i = num - 1; i >= 0; i--) { 708 SpaprVioDevice *dev = (SpaprVioDevice *)(qdevs[i]); 709 SpaprVioDeviceClass *vdc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 710 711 ret = vio_make_devnode(dev, fdt); 712 if (ret < 0) { 713 error_report("Couldn't create device node /vdevice/%s@%"PRIx32, 714 vdc->dt_name, dev->reg); 715 exit(1); 716 } 717 } 718 719 g_free(qdevs); 720 } 721 722 gchar *spapr_vio_stdout_path(SpaprVioBus *bus) 723 { 724 SpaprVioDevice *dev; 725 char *name, *path; 726 727 dev = spapr_vty_get_default(bus); 728 if (!dev) { 729 return NULL; 730 } 731 732 name = spapr_vio_get_dev_name(DEVICE(dev)); 733 path = g_strdup_printf("/vdevice/%s", name); 734 735 g_free(name); 736 return path; 737 } 738