1 /* 2 * QEMU sPAPR VIO code 3 * 4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com> 5 * Based on the s390 virtio bus code: 6 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 7 * 8 * This library is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU Lesser General Public 10 * License as published by the Free Software Foundation; either 11 * version 2 of the License, or (at your option) any later version. 12 * 13 * This library is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/error-report.h" 24 #include "qapi/error.h" 25 #include "qapi/visitor.h" 26 #include "hw/hw.h" 27 #include "qemu/log.h" 28 #include "sysemu/sysemu.h" 29 #include "hw/boards.h" 30 #include "hw/loader.h" 31 #include "elf.h" 32 #include "hw/sysbus.h" 33 #include "sysemu/kvm.h" 34 #include "sysemu/device_tree.h" 35 #include "kvm_ppc.h" 36 #include "sysemu/qtest.h" 37 38 #include "hw/ppc/spapr.h" 39 #include "hw/ppc/spapr_vio.h" 40 #include "hw/ppc/fdt.h" 41 #include "trace.h" 42 43 #include <libfdt.h> 44 45 #define SPAPR_VIO_REG_BASE 0x71000000 46 47 static char *spapr_vio_get_dev_name(DeviceState *qdev) 48 { 49 VIOsPAPRDevice *dev = VIO_SPAPR_DEVICE(qdev); 50 VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 51 52 /* Device tree style name device@reg */ 53 return g_strdup_printf("%s@%x", pc->dt_name, dev->reg); 54 } 55 56 static void spapr_vio_bus_class_init(ObjectClass *klass, void *data) 57 { 58 BusClass *k = BUS_CLASS(klass); 59 60 k->get_dev_path = spapr_vio_get_dev_name; 61 k->get_fw_dev_path = spapr_vio_get_dev_name; 62 } 63 64 static const TypeInfo spapr_vio_bus_info = { 65 .name = TYPE_SPAPR_VIO_BUS, 66 .parent = TYPE_BUS, 67 .class_init = spapr_vio_bus_class_init, 68 .instance_size = sizeof(VIOsPAPRBus), 69 }; 70 71 VIOsPAPRDevice *spapr_vio_find_by_reg(VIOsPAPRBus *bus, uint32_t reg) 72 { 73 BusChild *kid; 74 VIOsPAPRDevice *dev = NULL; 75 76 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 77 dev = (VIOsPAPRDevice *)kid->child; 78 if (dev->reg == reg) { 79 return dev; 80 } 81 } 82 83 return NULL; 84 } 85 86 static int vio_make_devnode(VIOsPAPRDevice *dev, 87 void *fdt) 88 { 89 VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 90 int vdevice_off, node_off, ret; 91 char *dt_name; 92 93 vdevice_off = fdt_path_offset(fdt, "/vdevice"); 94 if (vdevice_off < 0) { 95 return vdevice_off; 96 } 97 98 dt_name = spapr_vio_get_dev_name(DEVICE(dev)); 99 node_off = fdt_add_subnode(fdt, vdevice_off, dt_name); 100 g_free(dt_name); 101 if (node_off < 0) { 102 return node_off; 103 } 104 105 ret = fdt_setprop_cell(fdt, node_off, "reg", dev->reg); 106 if (ret < 0) { 107 return ret; 108 } 109 110 if (pc->dt_type) { 111 ret = fdt_setprop_string(fdt, node_off, "device_type", 112 pc->dt_type); 113 if (ret < 0) { 114 return ret; 115 } 116 } 117 118 if (pc->dt_compatible) { 119 ret = fdt_setprop_string(fdt, node_off, "compatible", 120 pc->dt_compatible); 121 if (ret < 0) { 122 return ret; 123 } 124 } 125 126 if (dev->irq) { 127 uint32_t ints_prop[2]; 128 129 spapr_dt_xics_irq(ints_prop, dev->irq, false); 130 ret = fdt_setprop(fdt, node_off, "interrupts", ints_prop, 131 sizeof(ints_prop)); 132 if (ret < 0) { 133 return ret; 134 } 135 } 136 137 ret = spapr_tcet_dma_dt(fdt, node_off, "ibm,my-dma-window", dev->tcet); 138 if (ret < 0) { 139 return ret; 140 } 141 142 if (pc->devnode) { 143 ret = (pc->devnode)(dev, fdt, node_off); 144 if (ret < 0) { 145 return ret; 146 } 147 } 148 149 return node_off; 150 } 151 152 /* 153 * CRQ handling 154 */ 155 static target_ulong h_reg_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr, 156 target_ulong opcode, target_ulong *args) 157 { 158 target_ulong reg = args[0]; 159 target_ulong queue_addr = args[1]; 160 target_ulong queue_len = args[2]; 161 VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 162 163 if (!dev) { 164 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 165 return H_PARAMETER; 166 } 167 168 /* We can't grok a queue size bigger than 256M for now */ 169 if (queue_len < 0x1000 || queue_len > 0x10000000) { 170 hcall_dprintf("Queue size too small or too big (0x" TARGET_FMT_lx 171 ")\n", queue_len); 172 return H_PARAMETER; 173 } 174 175 /* Check queue alignment */ 176 if (queue_addr & 0xfff) { 177 hcall_dprintf("Queue not aligned (0x" TARGET_FMT_lx ")\n", queue_addr); 178 return H_PARAMETER; 179 } 180 181 /* Check if device supports CRQs */ 182 if (!dev->crq.SendFunc) { 183 hcall_dprintf("Device does not support CRQ\n"); 184 return H_NOT_FOUND; 185 } 186 187 /* Already a queue ? */ 188 if (dev->crq.qsize) { 189 hcall_dprintf("CRQ already registered\n"); 190 return H_RESOURCE; 191 } 192 dev->crq.qladdr = queue_addr; 193 dev->crq.qsize = queue_len; 194 dev->crq.qnext = 0; 195 196 trace_spapr_vio_h_reg_crq(reg, queue_addr, queue_len); 197 return H_SUCCESS; 198 } 199 200 static target_ulong free_crq(VIOsPAPRDevice *dev) 201 { 202 dev->crq.qladdr = 0; 203 dev->crq.qsize = 0; 204 dev->crq.qnext = 0; 205 206 trace_spapr_vio_free_crq(dev->reg); 207 208 return H_SUCCESS; 209 } 210 211 static target_ulong h_free_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr, 212 target_ulong opcode, target_ulong *args) 213 { 214 target_ulong reg = args[0]; 215 VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 216 217 if (!dev) { 218 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 219 return H_PARAMETER; 220 } 221 222 return free_crq(dev); 223 } 224 225 static target_ulong h_send_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr, 226 target_ulong opcode, target_ulong *args) 227 { 228 target_ulong reg = args[0]; 229 target_ulong msg_hi = args[1]; 230 target_ulong msg_lo = args[2]; 231 VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 232 uint64_t crq_mangle[2]; 233 234 if (!dev) { 235 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 236 return H_PARAMETER; 237 } 238 crq_mangle[0] = cpu_to_be64(msg_hi); 239 crq_mangle[1] = cpu_to_be64(msg_lo); 240 241 if (dev->crq.SendFunc) { 242 return dev->crq.SendFunc(dev, (uint8_t *)crq_mangle); 243 } 244 245 return H_HARDWARE; 246 } 247 248 static target_ulong h_enable_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr, 249 target_ulong opcode, target_ulong *args) 250 { 251 target_ulong reg = args[0]; 252 VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 253 254 if (!dev) { 255 hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); 256 return H_PARAMETER; 257 } 258 259 return 0; 260 } 261 262 /* Returns negative error, 0 success, or positive: queue full */ 263 int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq) 264 { 265 int rc; 266 uint8_t byte; 267 268 if (!dev->crq.qsize) { 269 error_report("spapr_vio_send_creq on uninitialized queue"); 270 return -1; 271 } 272 273 /* Maybe do a fast path for KVM just writing to the pages */ 274 rc = spapr_vio_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1); 275 if (rc) { 276 return rc; 277 } 278 if (byte != 0) { 279 return 1; 280 } 281 282 rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8, 283 &crq[8], 8); 284 if (rc) { 285 return rc; 286 } 287 288 kvmppc_eieio(); 289 290 rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8); 291 if (rc) { 292 return rc; 293 } 294 295 dev->crq.qnext = (dev->crq.qnext + 16) % dev->crq.qsize; 296 297 if (dev->signal_state & 1) { 298 qemu_irq_pulse(spapr_vio_qirq(dev)); 299 } 300 301 return 0; 302 } 303 304 /* "quiesce" handling */ 305 306 static void spapr_vio_quiesce_one(VIOsPAPRDevice *dev) 307 { 308 if (dev->tcet) { 309 device_reset(DEVICE(dev->tcet)); 310 } 311 free_crq(dev); 312 } 313 314 void spapr_vio_set_bypass(VIOsPAPRDevice *dev, bool bypass) 315 { 316 if (!dev->tcet) { 317 return; 318 } 319 320 memory_region_set_enabled(&dev->mrbypass, bypass); 321 memory_region_set_enabled(spapr_tce_get_iommu(dev->tcet), !bypass); 322 323 dev->tcet->bypass = bypass; 324 } 325 326 static void rtas_set_tce_bypass(PowerPCCPU *cpu, sPAPRMachineState *spapr, 327 uint32_t token, 328 uint32_t nargs, target_ulong args, 329 uint32_t nret, target_ulong rets) 330 { 331 VIOsPAPRBus *bus = spapr->vio_bus; 332 VIOsPAPRDevice *dev; 333 uint32_t unit, enable; 334 335 if (nargs != 2) { 336 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 337 return; 338 } 339 unit = rtas_ld(args, 0); 340 enable = rtas_ld(args, 1); 341 dev = spapr_vio_find_by_reg(bus, unit); 342 if (!dev) { 343 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 344 return; 345 } 346 347 if (!dev->tcet) { 348 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 349 return; 350 } 351 352 spapr_vio_set_bypass(dev, !!enable); 353 354 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 355 } 356 357 static void rtas_quiesce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 358 uint32_t token, 359 uint32_t nargs, target_ulong args, 360 uint32_t nret, target_ulong rets) 361 { 362 VIOsPAPRBus *bus = spapr->vio_bus; 363 BusChild *kid; 364 VIOsPAPRDevice *dev = NULL; 365 366 if (nargs != 0) { 367 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 368 return; 369 } 370 371 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 372 dev = (VIOsPAPRDevice *)kid->child; 373 spapr_vio_quiesce_one(dev); 374 } 375 376 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 377 } 378 379 static VIOsPAPRDevice *reg_conflict(VIOsPAPRDevice *dev) 380 { 381 VIOsPAPRBus *bus = SPAPR_VIO_BUS(dev->qdev.parent_bus); 382 BusChild *kid; 383 VIOsPAPRDevice *other; 384 385 /* 386 * Check for a device other than the given one which is already 387 * using the requested address. We have to open code this because 388 * the given dev might already be in the list. 389 */ 390 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 391 other = VIO_SPAPR_DEVICE(kid->child); 392 393 if (other != dev && other->reg == dev->reg) { 394 return other; 395 } 396 } 397 398 return 0; 399 } 400 401 static void spapr_vio_busdev_reset(DeviceState *qdev) 402 { 403 VIOsPAPRDevice *dev = VIO_SPAPR_DEVICE(qdev); 404 VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 405 406 /* Shut down the request queue and TCEs if necessary */ 407 spapr_vio_quiesce_one(dev); 408 409 dev->signal_state = 0; 410 411 spapr_vio_set_bypass(dev, false); 412 if (pc->reset) { 413 pc->reset(dev); 414 } 415 } 416 417 /* 418 * The register property of a VIO device is defined in livirt using 419 * 0x1000 as a base register number plus a 0x1000 increment. For the 420 * VIO tty device, the base number is changed to 0x30000000. QEMU uses 421 * a base register number of 0x71000000 and then a simple increment. 422 * 423 * The formula below tries to compute a unique index number from the 424 * register value that will be used to define the IRQ number of the 425 * VIO device. 426 * 427 * A maximum of 256 VIO devices is covered. Collisions are possible 428 * but they will be detected when the IRQ is claimed. 429 */ 430 static inline uint32_t spapr_vio_reg_to_irq(uint32_t reg) 431 { 432 uint32_t irq; 433 434 if (reg >= SPAPR_VIO_REG_BASE) { 435 /* 436 * VIO device register values when allocated by QEMU. For 437 * these, we simply mask the high bits to fit the overall 438 * range: [0x00 - 0xff]. 439 * 440 * The nvram VIO device (reg=0x71000000) is a static device of 441 * the pseries machine and so is always allocated by QEMU. Its 442 * IRQ number is 0x0. 443 */ 444 irq = reg & 0xff; 445 446 } else if (reg >= 0x30000000) { 447 /* 448 * VIO tty devices register values, when allocated by livirt, 449 * are mapped in range [0xf0 - 0xff], gives us a maximum of 16 450 * vtys. 451 */ 452 irq = 0xf0 | ((reg >> 12) & 0xf); 453 454 } else { 455 /* 456 * Other VIO devices register values, when allocated by 457 * livirt, should be mapped in range [0x00 - 0xef]. Conflicts 458 * will be detected when IRQ is claimed. 459 */ 460 irq = (reg >> 12) & 0xff; 461 } 462 463 return SPAPR_IRQ_VIO | irq; 464 } 465 466 static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp) 467 { 468 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 469 VIOsPAPRDevice *dev = (VIOsPAPRDevice *)qdev; 470 VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 471 char *id; 472 Error *local_err = NULL; 473 474 if (dev->reg != -1) { 475 /* 476 * Explicitly assigned address, just verify that no-one else 477 * is using it. other mechanism). We have to open code this 478 * rather than using spapr_vio_find_by_reg() because sdev 479 * itself is already in the list. 480 */ 481 VIOsPAPRDevice *other = reg_conflict(dev); 482 483 if (other) { 484 error_setg(errp, "%s and %s devices conflict at address %#x", 485 object_get_typename(OBJECT(qdev)), 486 object_get_typename(OBJECT(&other->qdev)), 487 dev->reg); 488 return; 489 } 490 } else { 491 /* Need to assign an address */ 492 VIOsPAPRBus *bus = SPAPR_VIO_BUS(dev->qdev.parent_bus); 493 494 do { 495 dev->reg = bus->next_reg++; 496 } while (reg_conflict(dev)); 497 } 498 499 /* Don't overwrite ids assigned on the command line */ 500 if (!dev->qdev.id) { 501 id = spapr_vio_get_dev_name(DEVICE(dev)); 502 dev->qdev.id = id; 503 } 504 505 dev->irq = spapr_vio_reg_to_irq(dev->reg); 506 507 if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 508 dev->irq = spapr_irq_findone(spapr, &local_err); 509 if (local_err) { 510 error_propagate(errp, local_err); 511 return; 512 } 513 } 514 515 spapr_irq_claim(spapr, dev->irq, false, &local_err); 516 if (local_err) { 517 error_propagate(errp, local_err); 518 return; 519 } 520 521 if (pc->rtce_window_size) { 522 uint32_t liobn = SPAPR_VIO_LIOBN(dev->reg); 523 524 memory_region_init(&dev->mrroot, OBJECT(dev), "iommu-spapr-root", 525 ram_size); 526 memory_region_init_alias(&dev->mrbypass, OBJECT(dev), 527 "iommu-spapr-bypass", get_system_memory(), 528 0, ram_size); 529 memory_region_add_subregion_overlap(&dev->mrroot, 0, &dev->mrbypass, 1); 530 address_space_init(&dev->as, &dev->mrroot, qdev->id); 531 532 dev->tcet = spapr_tce_new_table(qdev, liobn); 533 spapr_tce_table_enable(dev->tcet, SPAPR_TCE_PAGE_SHIFT, 0, 534 pc->rtce_window_size >> SPAPR_TCE_PAGE_SHIFT); 535 dev->tcet->vdev = dev; 536 memory_region_add_subregion_overlap(&dev->mrroot, 0, 537 spapr_tce_get_iommu(dev->tcet), 2); 538 } 539 540 pc->realize(dev, errp); 541 } 542 543 static target_ulong h_vio_signal(PowerPCCPU *cpu, sPAPRMachineState *spapr, 544 target_ulong opcode, 545 target_ulong *args) 546 { 547 target_ulong reg = args[0]; 548 target_ulong mode = args[1]; 549 VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); 550 VIOsPAPRDeviceClass *pc; 551 552 if (!dev) { 553 return H_PARAMETER; 554 } 555 556 pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 557 558 if (mode & ~pc->signal_mask) { 559 return H_PARAMETER; 560 } 561 562 dev->signal_state = mode; 563 564 return H_SUCCESS; 565 } 566 567 VIOsPAPRBus *spapr_vio_bus_init(void) 568 { 569 VIOsPAPRBus *bus; 570 BusState *qbus; 571 DeviceState *dev; 572 573 /* Create bridge device */ 574 dev = qdev_create(NULL, TYPE_SPAPR_VIO_BRIDGE); 575 qdev_init_nofail(dev); 576 577 /* Create bus on bridge device */ 578 qbus = qbus_create(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio"); 579 bus = SPAPR_VIO_BUS(qbus); 580 bus->next_reg = SPAPR_VIO_REG_BASE; 581 582 /* hcall-vio */ 583 spapr_register_hypercall(H_VIO_SIGNAL, h_vio_signal); 584 585 /* hcall-crq */ 586 spapr_register_hypercall(H_REG_CRQ, h_reg_crq); 587 spapr_register_hypercall(H_FREE_CRQ, h_free_crq); 588 spapr_register_hypercall(H_SEND_CRQ, h_send_crq); 589 spapr_register_hypercall(H_ENABLE_CRQ, h_enable_crq); 590 591 /* RTAS calls */ 592 spapr_rtas_register(RTAS_IBM_SET_TCE_BYPASS, "ibm,set-tce-bypass", 593 rtas_set_tce_bypass); 594 spapr_rtas_register(RTAS_QUIESCE, "quiesce", rtas_quiesce); 595 596 return bus; 597 } 598 599 static void spapr_vio_bridge_class_init(ObjectClass *klass, void *data) 600 { 601 DeviceClass *dc = DEVICE_CLASS(klass); 602 603 dc->fw_name = "vdevice"; 604 } 605 606 static const TypeInfo spapr_vio_bridge_info = { 607 .name = TYPE_SPAPR_VIO_BRIDGE, 608 .parent = TYPE_SYS_BUS_DEVICE, 609 .class_init = spapr_vio_bridge_class_init, 610 }; 611 612 const VMStateDescription vmstate_spapr_vio = { 613 .name = "spapr_vio", 614 .version_id = 1, 615 .minimum_version_id = 1, 616 .fields = (VMStateField[]) { 617 /* Sanity check */ 618 VMSTATE_UINT32_EQUAL(reg, VIOsPAPRDevice, NULL), 619 VMSTATE_UINT32_EQUAL(irq, VIOsPAPRDevice, NULL), 620 621 /* General VIO device state */ 622 VMSTATE_UINT64(signal_state, VIOsPAPRDevice), 623 VMSTATE_UINT64(crq.qladdr, VIOsPAPRDevice), 624 VMSTATE_UINT32(crq.qsize, VIOsPAPRDevice), 625 VMSTATE_UINT32(crq.qnext, VIOsPAPRDevice), 626 627 VMSTATE_END_OF_LIST() 628 }, 629 }; 630 631 static void vio_spapr_device_class_init(ObjectClass *klass, void *data) 632 { 633 DeviceClass *k = DEVICE_CLASS(klass); 634 k->realize = spapr_vio_busdev_realize; 635 k->reset = spapr_vio_busdev_reset; 636 k->bus_type = TYPE_SPAPR_VIO_BUS; 637 } 638 639 static const TypeInfo spapr_vio_type_info = { 640 .name = TYPE_VIO_SPAPR_DEVICE, 641 .parent = TYPE_DEVICE, 642 .instance_size = sizeof(VIOsPAPRDevice), 643 .abstract = true, 644 .class_size = sizeof(VIOsPAPRDeviceClass), 645 .class_init = vio_spapr_device_class_init, 646 }; 647 648 static void spapr_vio_register_types(void) 649 { 650 type_register_static(&spapr_vio_bus_info); 651 type_register_static(&spapr_vio_bridge_info); 652 type_register_static(&spapr_vio_type_info); 653 } 654 655 type_init(spapr_vio_register_types) 656 657 static int compare_reg(const void *p1, const void *p2) 658 { 659 VIOsPAPRDevice const *dev1, *dev2; 660 661 dev1 = (VIOsPAPRDevice *)*(DeviceState **)p1; 662 dev2 = (VIOsPAPRDevice *)*(DeviceState **)p2; 663 664 if (dev1->reg < dev2->reg) { 665 return -1; 666 } 667 if (dev1->reg == dev2->reg) { 668 return 0; 669 } 670 671 /* dev1->reg > dev2->reg */ 672 return 1; 673 } 674 675 void spapr_dt_vdevice(VIOsPAPRBus *bus, void *fdt) 676 { 677 DeviceState *qdev, **qdevs; 678 BusChild *kid; 679 int i, num, ret = 0; 680 int node; 681 682 _FDT(node = fdt_add_subnode(fdt, 0, "vdevice")); 683 684 _FDT(fdt_setprop_string(fdt, node, "device_type", "vdevice")); 685 _FDT(fdt_setprop_string(fdt, node, "compatible", "IBM,vdevice")); 686 _FDT(fdt_setprop_cell(fdt, node, "#address-cells", 1)); 687 _FDT(fdt_setprop_cell(fdt, node, "#size-cells", 0)); 688 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); 689 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); 690 691 /* Count qdevs on the bus list */ 692 num = 0; 693 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 694 num++; 695 } 696 697 /* Copy out into an array of pointers */ 698 qdevs = g_new(DeviceState *, num); 699 num = 0; 700 QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { 701 qdevs[num++] = kid->child; 702 } 703 704 /* Sort the array */ 705 qsort(qdevs, num, sizeof(qdev), compare_reg); 706 707 /* Hack alert. Give the devices to libfdt in reverse order, we happen 708 * to know that will mean they are in forward order in the tree. */ 709 for (i = num - 1; i >= 0; i--) { 710 VIOsPAPRDevice *dev = (VIOsPAPRDevice *)(qdevs[i]); 711 VIOsPAPRDeviceClass *vdc = VIO_SPAPR_DEVICE_GET_CLASS(dev); 712 713 ret = vio_make_devnode(dev, fdt); 714 if (ret < 0) { 715 error_report("Couldn't create device node /vdevice/%s@%"PRIx32, 716 vdc->dt_name, dev->reg); 717 exit(1); 718 } 719 } 720 721 g_free(qdevs); 722 } 723 724 gchar *spapr_vio_stdout_path(VIOsPAPRBus *bus) 725 { 726 VIOsPAPRDevice *dev; 727 char *name, *path; 728 729 dev = spapr_vty_get_default(bus); 730 if (!dev) { 731 return NULL; 732 } 733 734 name = spapr_vio_get_dev_name(DEVICE(dev)); 735 path = g_strdup_printf("/vdevice/%s", name); 736 737 g_free(name); 738 return path; 739 } 740