1 /* 2 * QEMU sPAPR PCI host originated from Uninorth PCI host 3 * 4 * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation. 5 * Copyright (C) 2011 David Gibson, IBM Corporation. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 #include "qemu/osdep.h" 26 #include "qapi/error.h" 27 #include "qemu-common.h" 28 #include "cpu.h" 29 #include "hw/hw.h" 30 #include "hw/sysbus.h" 31 #include "hw/pci/pci.h" 32 #include "hw/pci/msi.h" 33 #include "hw/pci/msix.h" 34 #include "hw/pci/pci_host.h" 35 #include "hw/ppc/spapr.h" 36 #include "hw/pci-host/spapr.h" 37 #include "exec/address-spaces.h" 38 #include "exec/ram_addr.h" 39 #include <libfdt.h> 40 #include "trace.h" 41 #include "qemu/error-report.h" 42 #include "qapi/qmp/qerror.h" 43 #include "hw/ppc/fdt.h" 44 #include "hw/pci/pci_bridge.h" 45 #include "hw/pci/pci_bus.h" 46 #include "hw/pci/pci_ids.h" 47 #include "hw/ppc/spapr_drc.h" 48 #include "sysemu/device_tree.h" 49 #include "sysemu/kvm.h" 50 #include "sysemu/hostmem.h" 51 #include "sysemu/numa.h" 52 53 /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */ 54 #define RTAS_QUERY_FN 0 55 #define RTAS_CHANGE_FN 1 56 #define RTAS_RESET_FN 2 57 #define RTAS_CHANGE_MSI_FN 3 58 #define RTAS_CHANGE_MSIX_FN 4 59 60 /* Interrupt types to return on RTAS_CHANGE_* */ 61 #define RTAS_TYPE_MSI 1 62 #define RTAS_TYPE_MSIX 2 63 64 sPAPRPHBState *spapr_pci_find_phb(sPAPRMachineState *spapr, uint64_t buid) 65 { 66 sPAPRPHBState *sphb; 67 68 QLIST_FOREACH(sphb, &spapr->phbs, list) { 69 if (sphb->buid != buid) { 70 continue; 71 } 72 return sphb; 73 } 74 75 return NULL; 76 } 77 78 PCIDevice *spapr_pci_find_dev(sPAPRMachineState *spapr, uint64_t buid, 79 uint32_t config_addr) 80 { 81 sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid); 82 PCIHostState *phb = PCI_HOST_BRIDGE(sphb); 83 int bus_num = (config_addr >> 16) & 0xFF; 84 int devfn = (config_addr >> 8) & 0xFF; 85 86 if (!phb) { 87 return NULL; 88 } 89 90 return pci_find_device(phb->bus, bus_num, devfn); 91 } 92 93 static uint32_t rtas_pci_cfgaddr(uint32_t arg) 94 { 95 /* This handles the encoding of extended config space addresses */ 96 return ((arg >> 20) & 0xf00) | (arg & 0xff); 97 } 98 99 static void finish_read_pci_config(sPAPRMachineState *spapr, uint64_t buid, 100 uint32_t addr, uint32_t size, 101 target_ulong rets) 102 { 103 PCIDevice *pci_dev; 104 uint32_t val; 105 106 if ((size != 1) && (size != 2) && (size != 4)) { 107 /* access must be 1, 2 or 4 bytes */ 108 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 109 return; 110 } 111 112 pci_dev = spapr_pci_find_dev(spapr, buid, addr); 113 addr = rtas_pci_cfgaddr(addr); 114 115 if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) { 116 /* Access must be to a valid device, within bounds and 117 * naturally aligned */ 118 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 119 return; 120 } 121 122 val = pci_host_config_read_common(pci_dev, addr, 123 pci_config_size(pci_dev), size); 124 125 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 126 rtas_st(rets, 1, val); 127 } 128 129 static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr, 130 uint32_t token, uint32_t nargs, 131 target_ulong args, 132 uint32_t nret, target_ulong rets) 133 { 134 uint64_t buid; 135 uint32_t size, addr; 136 137 if ((nargs != 4) || (nret != 2)) { 138 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 139 return; 140 } 141 142 buid = rtas_ldq(args, 1); 143 size = rtas_ld(args, 3); 144 addr = rtas_ld(args, 0); 145 146 finish_read_pci_config(spapr, buid, addr, size, rets); 147 } 148 149 static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr, 150 uint32_t token, uint32_t nargs, 151 target_ulong args, 152 uint32_t nret, target_ulong rets) 153 { 154 uint32_t size, addr; 155 156 if ((nargs != 2) || (nret != 2)) { 157 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 158 return; 159 } 160 161 size = rtas_ld(args, 1); 162 addr = rtas_ld(args, 0); 163 164 finish_read_pci_config(spapr, 0, addr, size, rets); 165 } 166 167 static void finish_write_pci_config(sPAPRMachineState *spapr, uint64_t buid, 168 uint32_t addr, uint32_t size, 169 uint32_t val, target_ulong rets) 170 { 171 PCIDevice *pci_dev; 172 173 if ((size != 1) && (size != 2) && (size != 4)) { 174 /* access must be 1, 2 or 4 bytes */ 175 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 176 return; 177 } 178 179 pci_dev = spapr_pci_find_dev(spapr, buid, addr); 180 addr = rtas_pci_cfgaddr(addr); 181 182 if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) { 183 /* Access must be to a valid device, within bounds and 184 * naturally aligned */ 185 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 186 return; 187 } 188 189 pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev), 190 val, size); 191 192 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 193 } 194 195 static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr, 196 uint32_t token, uint32_t nargs, 197 target_ulong args, 198 uint32_t nret, target_ulong rets) 199 { 200 uint64_t buid; 201 uint32_t val, size, addr; 202 203 if ((nargs != 5) || (nret != 1)) { 204 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 205 return; 206 } 207 208 buid = rtas_ldq(args, 1); 209 val = rtas_ld(args, 4); 210 size = rtas_ld(args, 3); 211 addr = rtas_ld(args, 0); 212 213 finish_write_pci_config(spapr, buid, addr, size, val, rets); 214 } 215 216 static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr, 217 uint32_t token, uint32_t nargs, 218 target_ulong args, 219 uint32_t nret, target_ulong rets) 220 { 221 uint32_t val, size, addr; 222 223 if ((nargs != 3) || (nret != 1)) { 224 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 225 return; 226 } 227 228 229 val = rtas_ld(args, 2); 230 size = rtas_ld(args, 1); 231 addr = rtas_ld(args, 0); 232 233 finish_write_pci_config(spapr, 0, addr, size, val, rets); 234 } 235 236 /* 237 * Set MSI/MSIX message data. 238 * This is required for msi_notify()/msix_notify() which 239 * will write at the addresses via spapr_msi_write(). 240 * 241 * If hwaddr == 0, all entries will have .data == first_irq i.e. 242 * table will be reset. 243 */ 244 static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix, 245 unsigned first_irq, unsigned req_num) 246 { 247 unsigned i; 248 MSIMessage msg = { .address = addr, .data = first_irq }; 249 250 if (!msix) { 251 msi_set_message(pdev, msg); 252 trace_spapr_pci_msi_setup(pdev->name, 0, msg.address); 253 return; 254 } 255 256 for (i = 0; i < req_num; ++i) { 257 msix_set_message(pdev, i, msg); 258 trace_spapr_pci_msi_setup(pdev->name, i, msg.address); 259 if (addr) { 260 ++msg.data; 261 } 262 } 263 } 264 265 static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 266 uint32_t token, uint32_t nargs, 267 target_ulong args, uint32_t nret, 268 target_ulong rets) 269 { 270 uint32_t config_addr = rtas_ld(args, 0); 271 uint64_t buid = rtas_ldq(args, 1); 272 unsigned int func = rtas_ld(args, 3); 273 unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */ 274 unsigned int seq_num = rtas_ld(args, 5); 275 unsigned int ret_intr_type; 276 unsigned int irq, max_irqs = 0; 277 sPAPRPHBState *phb = NULL; 278 PCIDevice *pdev = NULL; 279 spapr_pci_msi *msi; 280 int *config_addr_key; 281 Error *err = NULL; 282 283 /* Fins sPAPRPHBState */ 284 phb = spapr_pci_find_phb(spapr, buid); 285 if (phb) { 286 pdev = spapr_pci_find_dev(spapr, buid, config_addr); 287 } 288 if (!phb || !pdev) { 289 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 290 return; 291 } 292 293 switch (func) { 294 case RTAS_CHANGE_FN: 295 if (msi_present(pdev)) { 296 ret_intr_type = RTAS_TYPE_MSI; 297 } else if (msix_present(pdev)) { 298 ret_intr_type = RTAS_TYPE_MSIX; 299 } else { 300 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 301 return; 302 } 303 break; 304 case RTAS_CHANGE_MSI_FN: 305 if (msi_present(pdev)) { 306 ret_intr_type = RTAS_TYPE_MSI; 307 } else { 308 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 309 return; 310 } 311 break; 312 case RTAS_CHANGE_MSIX_FN: 313 if (msix_present(pdev)) { 314 ret_intr_type = RTAS_TYPE_MSIX; 315 } else { 316 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 317 return; 318 } 319 break; 320 default: 321 error_report("rtas_ibm_change_msi(%u) is not implemented", func); 322 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 323 return; 324 } 325 326 msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr); 327 328 /* Releasing MSIs */ 329 if (!req_num) { 330 if (!msi) { 331 trace_spapr_pci_msi("Releasing wrong config", config_addr); 332 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 333 return; 334 } 335 336 spapr_irq_free(spapr, msi->first_irq, msi->num); 337 if (msi_present(pdev)) { 338 spapr_msi_setmsg(pdev, 0, false, 0, 0); 339 } 340 if (msix_present(pdev)) { 341 spapr_msi_setmsg(pdev, 0, true, 0, 0); 342 } 343 g_hash_table_remove(phb->msi, &config_addr); 344 345 trace_spapr_pci_msi("Released MSIs", config_addr); 346 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 347 rtas_st(rets, 1, 0); 348 return; 349 } 350 351 /* Enabling MSI */ 352 353 /* Check if the device supports as many IRQs as requested */ 354 if (ret_intr_type == RTAS_TYPE_MSI) { 355 max_irqs = msi_nr_vectors_allocated(pdev); 356 } else if (ret_intr_type == RTAS_TYPE_MSIX) { 357 max_irqs = pdev->msix_entries_nr; 358 } 359 if (!max_irqs) { 360 error_report("Requested interrupt type %d is not enabled for device %x", 361 ret_intr_type, config_addr); 362 rtas_st(rets, 0, -1); /* Hardware error */ 363 return; 364 } 365 /* Correct the number if the guest asked for too many */ 366 if (req_num > max_irqs) { 367 trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs); 368 req_num = max_irqs; 369 irq = 0; /* to avoid misleading trace */ 370 goto out; 371 } 372 373 /* Allocate MSIs */ 374 irq = spapr_irq_alloc_block(spapr, req_num, false, 375 ret_intr_type == RTAS_TYPE_MSI, &err); 376 if (err) { 377 error_reportf_err(err, "Can't allocate MSIs for device %x: ", 378 config_addr); 379 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 380 return; 381 } 382 383 /* Release previous MSIs */ 384 if (msi) { 385 spapr_irq_free(spapr, msi->first_irq, msi->num); 386 g_hash_table_remove(phb->msi, &config_addr); 387 } 388 389 /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */ 390 spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX, 391 irq, req_num); 392 393 /* Add MSI device to cache */ 394 msi = g_new(spapr_pci_msi, 1); 395 msi->first_irq = irq; 396 msi->num = req_num; 397 config_addr_key = g_new(int, 1); 398 *config_addr_key = config_addr; 399 g_hash_table_insert(phb->msi, config_addr_key, msi); 400 401 out: 402 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 403 rtas_st(rets, 1, req_num); 404 rtas_st(rets, 2, ++seq_num); 405 if (nret > 3) { 406 rtas_st(rets, 3, ret_intr_type); 407 } 408 409 trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq); 410 } 411 412 static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu, 413 sPAPRMachineState *spapr, 414 uint32_t token, 415 uint32_t nargs, 416 target_ulong args, 417 uint32_t nret, 418 target_ulong rets) 419 { 420 uint32_t config_addr = rtas_ld(args, 0); 421 uint64_t buid = rtas_ldq(args, 1); 422 unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3); 423 sPAPRPHBState *phb = NULL; 424 PCIDevice *pdev = NULL; 425 spapr_pci_msi *msi; 426 427 /* Find sPAPRPHBState */ 428 phb = spapr_pci_find_phb(spapr, buid); 429 if (phb) { 430 pdev = spapr_pci_find_dev(spapr, buid, config_addr); 431 } 432 if (!phb || !pdev) { 433 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 434 return; 435 } 436 437 /* Find device descriptor and start IRQ */ 438 msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr); 439 if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) { 440 trace_spapr_pci_msi("Failed to return vector", config_addr); 441 rtas_st(rets, 0, RTAS_OUT_HW_ERROR); 442 return; 443 } 444 intr_src_num = msi->first_irq + ioa_intr_num; 445 trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num, 446 intr_src_num); 447 448 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 449 rtas_st(rets, 1, intr_src_num); 450 rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */ 451 } 452 453 static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu, 454 sPAPRMachineState *spapr, 455 uint32_t token, uint32_t nargs, 456 target_ulong args, uint32_t nret, 457 target_ulong rets) 458 { 459 sPAPRPHBState *sphb; 460 uint32_t addr, option; 461 uint64_t buid; 462 int ret; 463 464 if ((nargs != 4) || (nret != 1)) { 465 goto param_error_exit; 466 } 467 468 buid = rtas_ldq(args, 1); 469 addr = rtas_ld(args, 0); 470 option = rtas_ld(args, 3); 471 472 sphb = spapr_pci_find_phb(spapr, buid); 473 if (!sphb) { 474 goto param_error_exit; 475 } 476 477 if (!spapr_phb_eeh_available(sphb)) { 478 goto param_error_exit; 479 } 480 481 ret = spapr_phb_vfio_eeh_set_option(sphb, addr, option); 482 rtas_st(rets, 0, ret); 483 return; 484 485 param_error_exit: 486 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 487 } 488 489 static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu, 490 sPAPRMachineState *spapr, 491 uint32_t token, uint32_t nargs, 492 target_ulong args, uint32_t nret, 493 target_ulong rets) 494 { 495 sPAPRPHBState *sphb; 496 PCIDevice *pdev; 497 uint32_t addr, option; 498 uint64_t buid; 499 500 if ((nargs != 4) || (nret != 2)) { 501 goto param_error_exit; 502 } 503 504 buid = rtas_ldq(args, 1); 505 sphb = spapr_pci_find_phb(spapr, buid); 506 if (!sphb) { 507 goto param_error_exit; 508 } 509 510 if (!spapr_phb_eeh_available(sphb)) { 511 goto param_error_exit; 512 } 513 514 /* 515 * We always have PE address of form "00BB0001". "BB" 516 * represents the bus number of PE's primary bus. 517 */ 518 option = rtas_ld(args, 3); 519 switch (option) { 520 case RTAS_GET_PE_ADDR: 521 addr = rtas_ld(args, 0); 522 pdev = spapr_pci_find_dev(spapr, buid, addr); 523 if (!pdev) { 524 goto param_error_exit; 525 } 526 527 rtas_st(rets, 1, (pci_bus_num(pci_get_bus(pdev)) << 16) + 1); 528 break; 529 case RTAS_GET_PE_MODE: 530 rtas_st(rets, 1, RTAS_PE_MODE_SHARED); 531 break; 532 default: 533 goto param_error_exit; 534 } 535 536 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 537 return; 538 539 param_error_exit: 540 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 541 } 542 543 static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu, 544 sPAPRMachineState *spapr, 545 uint32_t token, uint32_t nargs, 546 target_ulong args, uint32_t nret, 547 target_ulong rets) 548 { 549 sPAPRPHBState *sphb; 550 uint64_t buid; 551 int state, ret; 552 553 if ((nargs != 3) || (nret != 4 && nret != 5)) { 554 goto param_error_exit; 555 } 556 557 buid = rtas_ldq(args, 1); 558 sphb = spapr_pci_find_phb(spapr, buid); 559 if (!sphb) { 560 goto param_error_exit; 561 } 562 563 if (!spapr_phb_eeh_available(sphb)) { 564 goto param_error_exit; 565 } 566 567 ret = spapr_phb_vfio_eeh_get_state(sphb, &state); 568 rtas_st(rets, 0, ret); 569 if (ret != RTAS_OUT_SUCCESS) { 570 return; 571 } 572 573 rtas_st(rets, 1, state); 574 rtas_st(rets, 2, RTAS_EEH_SUPPORT); 575 rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO); 576 if (nret >= 5) { 577 rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO); 578 } 579 return; 580 581 param_error_exit: 582 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 583 } 584 585 static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu, 586 sPAPRMachineState *spapr, 587 uint32_t token, uint32_t nargs, 588 target_ulong args, uint32_t nret, 589 target_ulong rets) 590 { 591 sPAPRPHBState *sphb; 592 uint32_t option; 593 uint64_t buid; 594 int ret; 595 596 if ((nargs != 4) || (nret != 1)) { 597 goto param_error_exit; 598 } 599 600 buid = rtas_ldq(args, 1); 601 option = rtas_ld(args, 3); 602 sphb = spapr_pci_find_phb(spapr, buid); 603 if (!sphb) { 604 goto param_error_exit; 605 } 606 607 if (!spapr_phb_eeh_available(sphb)) { 608 goto param_error_exit; 609 } 610 611 ret = spapr_phb_vfio_eeh_reset(sphb, option); 612 rtas_st(rets, 0, ret); 613 return; 614 615 param_error_exit: 616 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 617 } 618 619 static void rtas_ibm_configure_pe(PowerPCCPU *cpu, 620 sPAPRMachineState *spapr, 621 uint32_t token, uint32_t nargs, 622 target_ulong args, uint32_t nret, 623 target_ulong rets) 624 { 625 sPAPRPHBState *sphb; 626 uint64_t buid; 627 int ret; 628 629 if ((nargs != 3) || (nret != 1)) { 630 goto param_error_exit; 631 } 632 633 buid = rtas_ldq(args, 1); 634 sphb = spapr_pci_find_phb(spapr, buid); 635 if (!sphb) { 636 goto param_error_exit; 637 } 638 639 if (!spapr_phb_eeh_available(sphb)) { 640 goto param_error_exit; 641 } 642 643 ret = spapr_phb_vfio_eeh_configure(sphb); 644 rtas_st(rets, 0, ret); 645 return; 646 647 param_error_exit: 648 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 649 } 650 651 /* To support it later */ 652 static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu, 653 sPAPRMachineState *spapr, 654 uint32_t token, uint32_t nargs, 655 target_ulong args, uint32_t nret, 656 target_ulong rets) 657 { 658 sPAPRPHBState *sphb; 659 int option; 660 uint64_t buid; 661 662 if ((nargs != 8) || (nret != 1)) { 663 goto param_error_exit; 664 } 665 666 buid = rtas_ldq(args, 1); 667 sphb = spapr_pci_find_phb(spapr, buid); 668 if (!sphb) { 669 goto param_error_exit; 670 } 671 672 if (!spapr_phb_eeh_available(sphb)) { 673 goto param_error_exit; 674 } 675 676 option = rtas_ld(args, 7); 677 switch (option) { 678 case RTAS_SLOT_TEMP_ERR_LOG: 679 case RTAS_SLOT_PERM_ERR_LOG: 680 break; 681 default: 682 goto param_error_exit; 683 } 684 685 /* We don't have error log yet */ 686 rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND); 687 return; 688 689 param_error_exit: 690 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 691 } 692 693 static int pci_spapr_swizzle(int slot, int pin) 694 { 695 return (slot + pin) % PCI_NUM_PINS; 696 } 697 698 static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num) 699 { 700 /* 701 * Here we need to convert pci_dev + irq_num to some unique value 702 * which is less than number of IRQs on the specific bus (4). We 703 * use standard PCI swizzling, that is (slot number + pin number) 704 * % 4. 705 */ 706 return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num); 707 } 708 709 static void pci_spapr_set_irq(void *opaque, int irq_num, int level) 710 { 711 /* 712 * Here we use the number returned by pci_spapr_map_irq to find a 713 * corresponding qemu_irq. 714 */ 715 sPAPRPHBState *phb = opaque; 716 717 trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq); 718 qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level); 719 } 720 721 static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin) 722 { 723 sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque); 724 PCIINTxRoute route; 725 726 route.mode = PCI_INTX_ENABLED; 727 route.irq = sphb->lsi_table[pin].irq; 728 729 return route; 730 } 731 732 /* 733 * MSI/MSIX memory region implementation. 734 * The handler handles both MSI and MSIX. 735 * The vector number is encoded in least bits in data. 736 */ 737 static void spapr_msi_write(void *opaque, hwaddr addr, 738 uint64_t data, unsigned size) 739 { 740 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 741 uint32_t irq = data; 742 743 trace_spapr_pci_msi_write(addr, data, irq); 744 745 qemu_irq_pulse(spapr_qirq(spapr, irq)); 746 } 747 748 static const MemoryRegionOps spapr_msi_ops = { 749 /* There is no .read as the read result is undefined by PCI spec */ 750 .read = NULL, 751 .write = spapr_msi_write, 752 .endianness = DEVICE_LITTLE_ENDIAN 753 }; 754 755 /* 756 * PHB PCI device 757 */ 758 static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) 759 { 760 sPAPRPHBState *phb = opaque; 761 762 return &phb->iommu_as; 763 } 764 765 static char *spapr_phb_vfio_get_loc_code(sPAPRPHBState *sphb, PCIDevice *pdev) 766 { 767 char *path = NULL, *buf = NULL, *host = NULL; 768 769 /* Get the PCI VFIO host id */ 770 host = object_property_get_str(OBJECT(pdev), "host", NULL); 771 if (!host) { 772 goto err_out; 773 } 774 775 /* Construct the path of the file that will give us the DT location */ 776 path = g_strdup_printf("/sys/bus/pci/devices/%s/devspec", host); 777 g_free(host); 778 if (!g_file_get_contents(path, &buf, NULL, NULL)) { 779 goto err_out; 780 } 781 g_free(path); 782 783 /* Construct and read from host device tree the loc-code */ 784 path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", buf); 785 g_free(buf); 786 if (!g_file_get_contents(path, &buf, NULL, NULL)) { 787 goto err_out; 788 } 789 return buf; 790 791 err_out: 792 g_free(path); 793 return NULL; 794 } 795 796 static char *spapr_phb_get_loc_code(sPAPRPHBState *sphb, PCIDevice *pdev) 797 { 798 char *buf; 799 const char *devtype = "qemu"; 800 uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)))); 801 802 if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) { 803 buf = spapr_phb_vfio_get_loc_code(sphb, pdev); 804 if (buf) { 805 return buf; 806 } 807 devtype = "vfio"; 808 } 809 /* 810 * For emulated devices and VFIO-failure case, make up 811 * the loc-code. 812 */ 813 buf = g_strdup_printf("%s_%s:%04x:%02x:%02x.%x", 814 devtype, pdev->name, sphb->index, busnr, 815 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 816 return buf; 817 } 818 819 /* Macros to operate with address in OF binding to PCI */ 820 #define b_x(x, p, l) (((x) & ((1<<(l))-1)) << (p)) 821 #define b_n(x) b_x((x), 31, 1) /* 0 if relocatable */ 822 #define b_p(x) b_x((x), 30, 1) /* 1 if prefetchable */ 823 #define b_t(x) b_x((x), 29, 1) /* 1 if the address is aliased */ 824 #define b_ss(x) b_x((x), 24, 2) /* the space code */ 825 #define b_bbbbbbbb(x) b_x((x), 16, 8) /* bus number */ 826 #define b_ddddd(x) b_x((x), 11, 5) /* device number */ 827 #define b_fff(x) b_x((x), 8, 3) /* function number */ 828 #define b_rrrrrrrr(x) b_x((x), 0, 8) /* register number */ 829 830 /* for 'reg'/'assigned-addresses' OF properties */ 831 #define RESOURCE_CELLS_SIZE 2 832 #define RESOURCE_CELLS_ADDRESS 3 833 834 typedef struct ResourceFields { 835 uint32_t phys_hi; 836 uint32_t phys_mid; 837 uint32_t phys_lo; 838 uint32_t size_hi; 839 uint32_t size_lo; 840 } QEMU_PACKED ResourceFields; 841 842 typedef struct ResourceProps { 843 ResourceFields reg[8]; 844 ResourceFields assigned[7]; 845 uint32_t reg_len; 846 uint32_t assigned_len; 847 } ResourceProps; 848 849 /* fill in the 'reg'/'assigned-resources' OF properties for 850 * a PCI device. 'reg' describes resource requirements for a 851 * device's IO/MEM regions, 'assigned-addresses' describes the 852 * actual resource assignments. 853 * 854 * the properties are arrays of ('phys-addr', 'size') pairs describing 855 * the addressable regions of the PCI device, where 'phys-addr' is a 856 * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to 857 * (phys.hi, phys.mid, phys.lo), and 'size' is a 858 * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo). 859 * 860 * phys.hi = 0xYYXXXXZZ, where: 861 * 0xYY = npt000ss 862 * ||| | 863 * ||| +-- space code 864 * ||| | 865 * ||| + 00 if configuration space 866 * ||| + 01 if IO region, 867 * ||| + 10 if 32-bit MEM region 868 * ||| + 11 if 64-bit MEM region 869 * ||| 870 * ||+------ for non-relocatable IO: 1 if aliased 871 * || for relocatable IO: 1 if below 64KB 872 * || for MEM: 1 if below 1MB 873 * |+------- 1 if region is prefetchable 874 * +-------- 1 if region is non-relocatable 875 * 0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function 876 * bits respectively 877 * 0xZZ = rrrrrrrr, the register number of the BAR corresponding 878 * to the region 879 * 880 * phys.mid and phys.lo correspond respectively to the hi/lo portions 881 * of the actual address of the region. 882 * 883 * how the phys-addr/size values are used differ slightly between 884 * 'reg' and 'assigned-addresses' properties. namely, 'reg' has 885 * an additional description for the config space region of the 886 * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0 887 * to describe the region as relocatable, with an address-mapping 888 * that corresponds directly to the PHB's address space for the 889 * resource. 'assigned-addresses' always has n=1 set with an absolute 890 * address assigned for the resource. in general, 'assigned-addresses' 891 * won't be populated, since addresses for PCI devices are generally 892 * unmapped initially and left to the guest to assign. 893 * 894 * note also that addresses defined in these properties are, at least 895 * for PAPR guests, relative to the PHBs IO/MEM windows, and 896 * correspond directly to the addresses in the BARs. 897 * 898 * in accordance with PCI Bus Binding to Open Firmware, 899 * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7, 900 * Appendix C. 901 */ 902 static void populate_resource_props(PCIDevice *d, ResourceProps *rp) 903 { 904 int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d)))); 905 uint32_t dev_id = (b_bbbbbbbb(bus_num) | 906 b_ddddd(PCI_SLOT(d->devfn)) | 907 b_fff(PCI_FUNC(d->devfn))); 908 ResourceFields *reg, *assigned; 909 int i, reg_idx = 0, assigned_idx = 0; 910 911 /* config space region */ 912 reg = &rp->reg[reg_idx++]; 913 reg->phys_hi = cpu_to_be32(dev_id); 914 reg->phys_mid = 0; 915 reg->phys_lo = 0; 916 reg->size_hi = 0; 917 reg->size_lo = 0; 918 919 for (i = 0; i < PCI_NUM_REGIONS; i++) { 920 if (!d->io_regions[i].size) { 921 continue; 922 } 923 924 reg = &rp->reg[reg_idx++]; 925 926 reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i))); 927 if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) { 928 reg->phys_hi |= cpu_to_be32(b_ss(1)); 929 } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 930 reg->phys_hi |= cpu_to_be32(b_ss(3)); 931 } else { 932 reg->phys_hi |= cpu_to_be32(b_ss(2)); 933 } 934 reg->phys_mid = 0; 935 reg->phys_lo = 0; 936 reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32); 937 reg->size_lo = cpu_to_be32(d->io_regions[i].size); 938 939 if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) { 940 continue; 941 } 942 943 assigned = &rp->assigned[assigned_idx++]; 944 assigned->phys_hi = cpu_to_be32(reg->phys_hi | b_n(1)); 945 assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32); 946 assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr); 947 assigned->size_hi = reg->size_hi; 948 assigned->size_lo = reg->size_lo; 949 } 950 951 rp->reg_len = reg_idx * sizeof(ResourceFields); 952 rp->assigned_len = assigned_idx * sizeof(ResourceFields); 953 } 954 955 typedef struct PCIClass PCIClass; 956 typedef struct PCISubClass PCISubClass; 957 typedef struct PCIIFace PCIIFace; 958 959 struct PCIIFace { 960 int iface; 961 const char *name; 962 }; 963 964 struct PCISubClass { 965 int subclass; 966 const char *name; 967 const PCIIFace *iface; 968 }; 969 970 struct PCIClass { 971 const char *name; 972 const PCISubClass *subc; 973 }; 974 975 static const PCISubClass undef_subclass[] = { 976 { PCI_CLASS_NOT_DEFINED_VGA, "display", NULL }, 977 { 0xFF, NULL, NULL }, 978 }; 979 980 static const PCISubClass mass_subclass[] = { 981 { PCI_CLASS_STORAGE_SCSI, "scsi", NULL }, 982 { PCI_CLASS_STORAGE_IDE, "ide", NULL }, 983 { PCI_CLASS_STORAGE_FLOPPY, "fdc", NULL }, 984 { PCI_CLASS_STORAGE_IPI, "ipi", NULL }, 985 { PCI_CLASS_STORAGE_RAID, "raid", NULL }, 986 { PCI_CLASS_STORAGE_ATA, "ata", NULL }, 987 { PCI_CLASS_STORAGE_SATA, "sata", NULL }, 988 { PCI_CLASS_STORAGE_SAS, "sas", NULL }, 989 { 0xFF, NULL, NULL }, 990 }; 991 992 static const PCISubClass net_subclass[] = { 993 { PCI_CLASS_NETWORK_ETHERNET, "ethernet", NULL }, 994 { PCI_CLASS_NETWORK_TOKEN_RING, "token-ring", NULL }, 995 { PCI_CLASS_NETWORK_FDDI, "fddi", NULL }, 996 { PCI_CLASS_NETWORK_ATM, "atm", NULL }, 997 { PCI_CLASS_NETWORK_ISDN, "isdn", NULL }, 998 { PCI_CLASS_NETWORK_WORLDFIP, "worldfip", NULL }, 999 { PCI_CLASS_NETWORK_PICMG214, "picmg", NULL }, 1000 { 0xFF, NULL, NULL }, 1001 }; 1002 1003 static const PCISubClass displ_subclass[] = { 1004 { PCI_CLASS_DISPLAY_VGA, "vga", NULL }, 1005 { PCI_CLASS_DISPLAY_XGA, "xga", NULL }, 1006 { PCI_CLASS_DISPLAY_3D, "3d-controller", NULL }, 1007 { 0xFF, NULL, NULL }, 1008 }; 1009 1010 static const PCISubClass media_subclass[] = { 1011 { PCI_CLASS_MULTIMEDIA_VIDEO, "video", NULL }, 1012 { PCI_CLASS_MULTIMEDIA_AUDIO, "sound", NULL }, 1013 { PCI_CLASS_MULTIMEDIA_PHONE, "telephony", NULL }, 1014 { 0xFF, NULL, NULL }, 1015 }; 1016 1017 static const PCISubClass mem_subclass[] = { 1018 { PCI_CLASS_MEMORY_RAM, "memory", NULL }, 1019 { PCI_CLASS_MEMORY_FLASH, "flash", NULL }, 1020 { 0xFF, NULL, NULL }, 1021 }; 1022 1023 static const PCISubClass bridg_subclass[] = { 1024 { PCI_CLASS_BRIDGE_HOST, "host", NULL }, 1025 { PCI_CLASS_BRIDGE_ISA, "isa", NULL }, 1026 { PCI_CLASS_BRIDGE_EISA, "eisa", NULL }, 1027 { PCI_CLASS_BRIDGE_MC, "mca", NULL }, 1028 { PCI_CLASS_BRIDGE_PCI, "pci", NULL }, 1029 { PCI_CLASS_BRIDGE_PCMCIA, "pcmcia", NULL }, 1030 { PCI_CLASS_BRIDGE_NUBUS, "nubus", NULL }, 1031 { PCI_CLASS_BRIDGE_CARDBUS, "cardbus", NULL }, 1032 { PCI_CLASS_BRIDGE_RACEWAY, "raceway", NULL }, 1033 { PCI_CLASS_BRIDGE_PCI_SEMITP, "semi-transparent-pci", NULL }, 1034 { PCI_CLASS_BRIDGE_IB_PCI, "infiniband", NULL }, 1035 { 0xFF, NULL, NULL }, 1036 }; 1037 1038 static const PCISubClass comm_subclass[] = { 1039 { PCI_CLASS_COMMUNICATION_SERIAL, "serial", NULL }, 1040 { PCI_CLASS_COMMUNICATION_PARALLEL, "parallel", NULL }, 1041 { PCI_CLASS_COMMUNICATION_MULTISERIAL, "multiport-serial", NULL }, 1042 { PCI_CLASS_COMMUNICATION_MODEM, "modem", NULL }, 1043 { PCI_CLASS_COMMUNICATION_GPIB, "gpib", NULL }, 1044 { PCI_CLASS_COMMUNICATION_SC, "smart-card", NULL }, 1045 { 0xFF, NULL, NULL, }, 1046 }; 1047 1048 static const PCIIFace pic_iface[] = { 1049 { PCI_CLASS_SYSTEM_PIC_IOAPIC, "io-apic" }, 1050 { PCI_CLASS_SYSTEM_PIC_IOXAPIC, "io-xapic" }, 1051 { 0xFF, NULL }, 1052 }; 1053 1054 static const PCISubClass sys_subclass[] = { 1055 { PCI_CLASS_SYSTEM_PIC, "interrupt-controller", pic_iface }, 1056 { PCI_CLASS_SYSTEM_DMA, "dma-controller", NULL }, 1057 { PCI_CLASS_SYSTEM_TIMER, "timer", NULL }, 1058 { PCI_CLASS_SYSTEM_RTC, "rtc", NULL }, 1059 { PCI_CLASS_SYSTEM_PCI_HOTPLUG, "hot-plug-controller", NULL }, 1060 { PCI_CLASS_SYSTEM_SDHCI, "sd-host-controller", NULL }, 1061 { 0xFF, NULL, NULL }, 1062 }; 1063 1064 static const PCISubClass inp_subclass[] = { 1065 { PCI_CLASS_INPUT_KEYBOARD, "keyboard", NULL }, 1066 { PCI_CLASS_INPUT_PEN, "pen", NULL }, 1067 { PCI_CLASS_INPUT_MOUSE, "mouse", NULL }, 1068 { PCI_CLASS_INPUT_SCANNER, "scanner", NULL }, 1069 { PCI_CLASS_INPUT_GAMEPORT, "gameport", NULL }, 1070 { 0xFF, NULL, NULL }, 1071 }; 1072 1073 static const PCISubClass dock_subclass[] = { 1074 { PCI_CLASS_DOCKING_GENERIC, "dock", NULL }, 1075 { 0xFF, NULL, NULL }, 1076 }; 1077 1078 static const PCISubClass cpu_subclass[] = { 1079 { PCI_CLASS_PROCESSOR_PENTIUM, "pentium", NULL }, 1080 { PCI_CLASS_PROCESSOR_POWERPC, "powerpc", NULL }, 1081 { PCI_CLASS_PROCESSOR_MIPS, "mips", NULL }, 1082 { PCI_CLASS_PROCESSOR_CO, "co-processor", NULL }, 1083 { 0xFF, NULL, NULL }, 1084 }; 1085 1086 static const PCIIFace usb_iface[] = { 1087 { PCI_CLASS_SERIAL_USB_UHCI, "usb-uhci" }, 1088 { PCI_CLASS_SERIAL_USB_OHCI, "usb-ohci", }, 1089 { PCI_CLASS_SERIAL_USB_EHCI, "usb-ehci" }, 1090 { PCI_CLASS_SERIAL_USB_XHCI, "usb-xhci" }, 1091 { PCI_CLASS_SERIAL_USB_UNKNOWN, "usb-unknown" }, 1092 { PCI_CLASS_SERIAL_USB_DEVICE, "usb-device" }, 1093 { 0xFF, NULL }, 1094 }; 1095 1096 static const PCISubClass ser_subclass[] = { 1097 { PCI_CLASS_SERIAL_FIREWIRE, "firewire", NULL }, 1098 { PCI_CLASS_SERIAL_ACCESS, "access-bus", NULL }, 1099 { PCI_CLASS_SERIAL_SSA, "ssa", NULL }, 1100 { PCI_CLASS_SERIAL_USB, "usb", usb_iface }, 1101 { PCI_CLASS_SERIAL_FIBER, "fibre-channel", NULL }, 1102 { PCI_CLASS_SERIAL_SMBUS, "smb", NULL }, 1103 { PCI_CLASS_SERIAL_IB, "infiniband", NULL }, 1104 { PCI_CLASS_SERIAL_IPMI, "ipmi", NULL }, 1105 { PCI_CLASS_SERIAL_SERCOS, "sercos", NULL }, 1106 { PCI_CLASS_SERIAL_CANBUS, "canbus", NULL }, 1107 { 0xFF, NULL, NULL }, 1108 }; 1109 1110 static const PCISubClass wrl_subclass[] = { 1111 { PCI_CLASS_WIRELESS_IRDA, "irda", NULL }, 1112 { PCI_CLASS_WIRELESS_CIR, "consumer-ir", NULL }, 1113 { PCI_CLASS_WIRELESS_RF_CONTROLLER, "rf-controller", NULL }, 1114 { PCI_CLASS_WIRELESS_BLUETOOTH, "bluetooth", NULL }, 1115 { PCI_CLASS_WIRELESS_BROADBAND, "broadband", NULL }, 1116 { 0xFF, NULL, NULL }, 1117 }; 1118 1119 static const PCISubClass sat_subclass[] = { 1120 { PCI_CLASS_SATELLITE_TV, "satellite-tv", NULL }, 1121 { PCI_CLASS_SATELLITE_AUDIO, "satellite-audio", NULL }, 1122 { PCI_CLASS_SATELLITE_VOICE, "satellite-voice", NULL }, 1123 { PCI_CLASS_SATELLITE_DATA, "satellite-data", NULL }, 1124 { 0xFF, NULL, NULL }, 1125 }; 1126 1127 static const PCISubClass crypt_subclass[] = { 1128 { PCI_CLASS_CRYPT_NETWORK, "network-encryption", NULL }, 1129 { PCI_CLASS_CRYPT_ENTERTAINMENT, 1130 "entertainment-encryption", NULL }, 1131 { 0xFF, NULL, NULL }, 1132 }; 1133 1134 static const PCISubClass spc_subclass[] = { 1135 { PCI_CLASS_SP_DPIO, "dpio", NULL }, 1136 { PCI_CLASS_SP_PERF, "counter", NULL }, 1137 { PCI_CLASS_SP_SYNCH, "measurement", NULL }, 1138 { PCI_CLASS_SP_MANAGEMENT, "management-card", NULL }, 1139 { 0xFF, NULL, NULL }, 1140 }; 1141 1142 static const PCIClass pci_classes[] = { 1143 { "legacy-device", undef_subclass }, 1144 { "mass-storage", mass_subclass }, 1145 { "network", net_subclass }, 1146 { "display", displ_subclass, }, 1147 { "multimedia-device", media_subclass }, 1148 { "memory-controller", mem_subclass }, 1149 { "unknown-bridge", bridg_subclass }, 1150 { "communication-controller", comm_subclass}, 1151 { "system-peripheral", sys_subclass }, 1152 { "input-controller", inp_subclass }, 1153 { "docking-station", dock_subclass }, 1154 { "cpu", cpu_subclass }, 1155 { "serial-bus", ser_subclass }, 1156 { "wireless-controller", wrl_subclass }, 1157 { "intelligent-io", NULL }, 1158 { "satellite-device", sat_subclass }, 1159 { "encryption", crypt_subclass }, 1160 { "data-processing-controller", spc_subclass }, 1161 }; 1162 1163 static const char *pci_find_device_name(uint8_t class, uint8_t subclass, 1164 uint8_t iface) 1165 { 1166 const PCIClass *pclass; 1167 const PCISubClass *psubclass; 1168 const PCIIFace *piface; 1169 const char *name; 1170 1171 if (class >= ARRAY_SIZE(pci_classes)) { 1172 return "pci"; 1173 } 1174 1175 pclass = pci_classes + class; 1176 name = pclass->name; 1177 1178 if (pclass->subc == NULL) { 1179 return name; 1180 } 1181 1182 psubclass = pclass->subc; 1183 while ((psubclass->subclass & 0xff) != 0xff) { 1184 if ((psubclass->subclass & 0xff) == subclass) { 1185 name = psubclass->name; 1186 break; 1187 } 1188 psubclass++; 1189 } 1190 1191 piface = psubclass->iface; 1192 if (piface == NULL) { 1193 return name; 1194 } 1195 while ((piface->iface & 0xff) != 0xff) { 1196 if ((piface->iface & 0xff) == iface) { 1197 name = piface->name; 1198 break; 1199 } 1200 piface++; 1201 } 1202 1203 return name; 1204 } 1205 1206 static gchar *pci_get_node_name(PCIDevice *dev) 1207 { 1208 int slot = PCI_SLOT(dev->devfn); 1209 int func = PCI_FUNC(dev->devfn); 1210 uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3); 1211 const char *name; 1212 1213 name = pci_find_device_name((ccode >> 16) & 0xff, (ccode >> 8) & 0xff, 1214 ccode & 0xff); 1215 1216 if (func != 0) { 1217 return g_strdup_printf("%s@%x,%x", name, slot, func); 1218 } else { 1219 return g_strdup_printf("%s@%x", name, slot); 1220 } 1221 } 1222 1223 static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb, 1224 PCIDevice *pdev); 1225 1226 static void spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset, 1227 sPAPRPHBState *sphb) 1228 { 1229 ResourceProps rp; 1230 bool is_bridge = false; 1231 int pci_status; 1232 char *buf = NULL; 1233 uint32_t drc_index = spapr_phb_get_pci_drc_index(sphb, dev); 1234 uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3); 1235 uint32_t max_msi, max_msix; 1236 1237 if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) == 1238 PCI_HEADER_TYPE_BRIDGE) { 1239 is_bridge = true; 1240 } 1241 1242 /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */ 1243 _FDT(fdt_setprop_cell(fdt, offset, "vendor-id", 1244 pci_default_read_config(dev, PCI_VENDOR_ID, 2))); 1245 _FDT(fdt_setprop_cell(fdt, offset, "device-id", 1246 pci_default_read_config(dev, PCI_DEVICE_ID, 2))); 1247 _FDT(fdt_setprop_cell(fdt, offset, "revision-id", 1248 pci_default_read_config(dev, PCI_REVISION_ID, 1))); 1249 _FDT(fdt_setprop_cell(fdt, offset, "class-code", ccode)); 1250 if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) { 1251 _FDT(fdt_setprop_cell(fdt, offset, "interrupts", 1252 pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1))); 1253 } 1254 1255 if (!is_bridge) { 1256 _FDT(fdt_setprop_cell(fdt, offset, "min-grant", 1257 pci_default_read_config(dev, PCI_MIN_GNT, 1))); 1258 _FDT(fdt_setprop_cell(fdt, offset, "max-latency", 1259 pci_default_read_config(dev, PCI_MAX_LAT, 1))); 1260 } 1261 1262 if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) { 1263 _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id", 1264 pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2))); 1265 } 1266 1267 if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) { 1268 _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id", 1269 pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2))); 1270 } 1271 1272 _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size", 1273 pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1))); 1274 1275 /* the following fdt cells are masked off the pci status register */ 1276 pci_status = pci_default_read_config(dev, PCI_STATUS, 2); 1277 _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed", 1278 PCI_STATUS_DEVSEL_MASK & pci_status)); 1279 1280 if (pci_status & PCI_STATUS_FAST_BACK) { 1281 _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0)); 1282 } 1283 if (pci_status & PCI_STATUS_66MHZ) { 1284 _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0)); 1285 } 1286 if (pci_status & PCI_STATUS_UDF) { 1287 _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0)); 1288 } 1289 1290 _FDT(fdt_setprop_string(fdt, offset, "name", 1291 pci_find_device_name((ccode >> 16) & 0xff, 1292 (ccode >> 8) & 0xff, 1293 ccode & 0xff))); 1294 1295 buf = spapr_phb_get_loc_code(sphb, dev); 1296 _FDT(fdt_setprop_string(fdt, offset, "ibm,loc-code", buf)); 1297 g_free(buf); 1298 1299 if (drc_index) { 1300 _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)); 1301 } 1302 1303 _FDT(fdt_setprop_cell(fdt, offset, "#address-cells", 1304 RESOURCE_CELLS_ADDRESS)); 1305 _FDT(fdt_setprop_cell(fdt, offset, "#size-cells", 1306 RESOURCE_CELLS_SIZE)); 1307 1308 if (msi_present(dev)) { 1309 max_msi = msi_nr_vectors_allocated(dev); 1310 if (max_msi) { 1311 _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi", max_msi)); 1312 } 1313 } 1314 if (msix_present(dev)) { 1315 max_msix = dev->msix_entries_nr; 1316 if (max_msix) { 1317 _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x", max_msix)); 1318 } 1319 } 1320 1321 populate_resource_props(dev, &rp); 1322 _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len)); 1323 _FDT(fdt_setprop(fdt, offset, "assigned-addresses", 1324 (uint8_t *)rp.assigned, rp.assigned_len)); 1325 1326 if (sphb->pcie_ecs && pci_is_express(dev)) { 1327 _FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1)); 1328 } 1329 } 1330 1331 /* create OF node for pci device and required OF DT properties */ 1332 static int spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev, 1333 void *fdt, int node_offset) 1334 { 1335 int offset; 1336 gchar *nodename; 1337 1338 nodename = pci_get_node_name(dev); 1339 _FDT(offset = fdt_add_subnode(fdt, node_offset, nodename)); 1340 g_free(nodename); 1341 1342 spapr_populate_pci_child_dt(dev, fdt, offset, phb); 1343 1344 return offset; 1345 } 1346 1347 /* Callback to be called during DRC release. */ 1348 void spapr_phb_remove_pci_device_cb(DeviceState *dev) 1349 { 1350 /* some version guests do not wait for completion of a device 1351 * cleanup (generally done asynchronously by the kernel) before 1352 * signaling to QEMU that the device is safe, but instead sleep 1353 * for some 'safe' period of time. unfortunately on a busy host 1354 * this sleep isn't guaranteed to be long enough, resulting in 1355 * bad things like IRQ lines being left asserted during final 1356 * device removal. to deal with this we call reset just prior 1357 * to finalizing the device, which will put the device back into 1358 * an 'idle' state, as the device cleanup code expects. 1359 */ 1360 pci_device_reset(PCI_DEVICE(dev)); 1361 object_unparent(OBJECT(dev)); 1362 } 1363 1364 static sPAPRDRConnector *spapr_phb_get_pci_func_drc(sPAPRPHBState *phb, 1365 uint32_t busnr, 1366 int32_t devfn) 1367 { 1368 return spapr_drc_by_id(TYPE_SPAPR_DRC_PCI, 1369 (phb->index << 16) | (busnr << 8) | devfn); 1370 } 1371 1372 static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb, 1373 PCIDevice *pdev) 1374 { 1375 uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)))); 1376 return spapr_phb_get_pci_func_drc(phb, busnr, pdev->devfn); 1377 } 1378 1379 static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb, 1380 PCIDevice *pdev) 1381 { 1382 sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev); 1383 1384 if (!drc) { 1385 return 0; 1386 } 1387 1388 return spapr_drc_index(drc); 1389 } 1390 1391 static void spapr_pci_plug(HotplugHandler *plug_handler, 1392 DeviceState *plugged_dev, Error **errp) 1393 { 1394 sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler)); 1395 PCIDevice *pdev = PCI_DEVICE(plugged_dev); 1396 sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev); 1397 Error *local_err = NULL; 1398 PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))); 1399 uint32_t slotnr = PCI_SLOT(pdev->devfn); 1400 void *fdt = NULL; 1401 int fdt_start_offset, fdt_size; 1402 1403 /* if DR is disabled we don't need to do anything in the case of 1404 * hotplug or coldplug callbacks 1405 */ 1406 if (!phb->dr_enabled) { 1407 /* if this is a hotplug operation initiated by the user 1408 * we need to let them know it's not enabled 1409 */ 1410 if (plugged_dev->hotplugged) { 1411 error_setg(&local_err, QERR_BUS_NO_HOTPLUG, 1412 object_get_typename(OBJECT(phb))); 1413 } 1414 goto out; 1415 } 1416 1417 g_assert(drc); 1418 1419 /* Following the QEMU convention used for PCIe multifunction 1420 * hotplug, we do not allow functions to be hotplugged to a 1421 * slot that already has function 0 present 1422 */ 1423 if (plugged_dev->hotplugged && bus->devices[PCI_DEVFN(slotnr, 0)] && 1424 PCI_FUNC(pdev->devfn) != 0) { 1425 error_setg(&local_err, "PCI: slot %d function 0 already ocuppied by %s," 1426 " additional functions can no longer be exposed to guest.", 1427 slotnr, bus->devices[PCI_DEVFN(slotnr, 0)]->name); 1428 goto out; 1429 } 1430 1431 fdt = create_device_tree(&fdt_size); 1432 fdt_start_offset = spapr_create_pci_child_dt(phb, pdev, fdt, 0); 1433 1434 spapr_drc_attach(drc, DEVICE(pdev), fdt, fdt_start_offset, &local_err); 1435 if (local_err) { 1436 goto out; 1437 } 1438 1439 /* If this is function 0, signal hotplug for all the device functions. 1440 * Otherwise defer sending the hotplug event. 1441 */ 1442 if (!spapr_drc_hotplugged(plugged_dev)) { 1443 spapr_drc_reset(drc); 1444 } else if (PCI_FUNC(pdev->devfn) == 0) { 1445 int i; 1446 1447 for (i = 0; i < 8; i++) { 1448 sPAPRDRConnector *func_drc; 1449 sPAPRDRConnectorClass *func_drck; 1450 sPAPRDREntitySense state; 1451 1452 func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus), 1453 PCI_DEVFN(slotnr, i)); 1454 func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc); 1455 state = func_drck->dr_entity_sense(func_drc); 1456 1457 if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) { 1458 spapr_hotplug_req_add_by_index(func_drc); 1459 } 1460 } 1461 } 1462 1463 out: 1464 if (local_err) { 1465 error_propagate(errp, local_err); 1466 g_free(fdt); 1467 } 1468 } 1469 1470 static void spapr_pci_unplug_request(HotplugHandler *plug_handler, 1471 DeviceState *plugged_dev, Error **errp) 1472 { 1473 sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler)); 1474 PCIDevice *pdev = PCI_DEVICE(plugged_dev); 1475 sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev); 1476 1477 if (!phb->dr_enabled) { 1478 error_setg(errp, QERR_BUS_NO_HOTPLUG, 1479 object_get_typename(OBJECT(phb))); 1480 return; 1481 } 1482 1483 g_assert(drc); 1484 g_assert(drc->dev == plugged_dev); 1485 1486 if (!spapr_drc_unplug_requested(drc)) { 1487 PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))); 1488 uint32_t slotnr = PCI_SLOT(pdev->devfn); 1489 sPAPRDRConnector *func_drc; 1490 sPAPRDRConnectorClass *func_drck; 1491 sPAPRDREntitySense state; 1492 int i; 1493 1494 /* ensure any other present functions are pending unplug */ 1495 if (PCI_FUNC(pdev->devfn) == 0) { 1496 for (i = 1; i < 8; i++) { 1497 func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus), 1498 PCI_DEVFN(slotnr, i)); 1499 func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc); 1500 state = func_drck->dr_entity_sense(func_drc); 1501 if (state == SPAPR_DR_ENTITY_SENSE_PRESENT 1502 && !spapr_drc_unplug_requested(func_drc)) { 1503 error_setg(errp, 1504 "PCI: slot %d, function %d still present. " 1505 "Must unplug all non-0 functions first.", 1506 slotnr, i); 1507 return; 1508 } 1509 } 1510 } 1511 1512 spapr_drc_detach(drc); 1513 1514 /* if this isn't func 0, defer unplug event. otherwise signal removal 1515 * for all present functions 1516 */ 1517 if (PCI_FUNC(pdev->devfn) == 0) { 1518 for (i = 7; i >= 0; i--) { 1519 func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus), 1520 PCI_DEVFN(slotnr, i)); 1521 func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc); 1522 state = func_drck->dr_entity_sense(func_drc); 1523 if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) { 1524 spapr_hotplug_req_remove_by_index(func_drc); 1525 } 1526 } 1527 } 1528 } 1529 } 1530 1531 static void spapr_phb_realize(DeviceState *dev, Error **errp) 1532 { 1533 /* We don't use SPAPR_MACHINE() in order to exit gracefully if the user 1534 * tries to add a sPAPR PHB to a non-pseries machine. 1535 */ 1536 sPAPRMachineState *spapr = 1537 (sPAPRMachineState *) object_dynamic_cast(qdev_get_machine(), 1538 TYPE_SPAPR_MACHINE); 1539 SysBusDevice *s = SYS_BUS_DEVICE(dev); 1540 sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s); 1541 PCIHostState *phb = PCI_HOST_BRIDGE(s); 1542 char *namebuf; 1543 int i; 1544 PCIBus *bus; 1545 uint64_t msi_window_size = 4096; 1546 sPAPRTCETable *tcet; 1547 const unsigned windows_supported = 1548 sphb->ddw_enabled ? SPAPR_PCI_DMA_MAX_WINDOWS : 1; 1549 1550 if (!spapr) { 1551 error_setg(errp, TYPE_SPAPR_PCI_HOST_BRIDGE " needs a pseries machine"); 1552 return; 1553 } 1554 1555 if (sphb->index != (uint32_t)-1) { 1556 sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); 1557 Error *local_err = NULL; 1558 1559 smc->phb_placement(spapr, sphb->index, 1560 &sphb->buid, &sphb->io_win_addr, 1561 &sphb->mem_win_addr, &sphb->mem64_win_addr, 1562 windows_supported, sphb->dma_liobn, &local_err); 1563 if (local_err) { 1564 error_propagate(errp, local_err); 1565 return; 1566 } 1567 } else { 1568 error_setg(errp, "\"index\" for PAPR PHB is mandatory"); 1569 return; 1570 } 1571 1572 if (sphb->mem64_win_size != 0) { 1573 if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { 1574 error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx 1575 " (max 2 GiB)", sphb->mem_win_size); 1576 return; 1577 } 1578 1579 /* 64-bit window defaults to identity mapping */ 1580 sphb->mem64_win_pciaddr = sphb->mem64_win_addr; 1581 } else if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { 1582 /* 1583 * For compatibility with old configuration, if no 64-bit MMIO 1584 * window is specified, but the ordinary (32-bit) memory 1585 * window is specified as > 2GiB, we treat it as a 2GiB 32-bit 1586 * window, with a 64-bit MMIO window following on immediately 1587 * afterwards 1588 */ 1589 sphb->mem64_win_size = sphb->mem_win_size - SPAPR_PCI_MEM32_WIN_SIZE; 1590 sphb->mem64_win_addr = sphb->mem_win_addr + SPAPR_PCI_MEM32_WIN_SIZE; 1591 sphb->mem64_win_pciaddr = 1592 SPAPR_PCI_MEM_WIN_BUS_OFFSET + SPAPR_PCI_MEM32_WIN_SIZE; 1593 sphb->mem_win_size = SPAPR_PCI_MEM32_WIN_SIZE; 1594 } 1595 1596 if (spapr_pci_find_phb(spapr, sphb->buid)) { 1597 error_setg(errp, "PCI host bridges must have unique BUIDs"); 1598 return; 1599 } 1600 1601 if (sphb->numa_node != -1 && 1602 (sphb->numa_node >= MAX_NODES || !numa_info[sphb->numa_node].present)) { 1603 error_setg(errp, "Invalid NUMA node ID for PCI host bridge"); 1604 return; 1605 } 1606 1607 sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid); 1608 1609 /* Initialize memory regions */ 1610 namebuf = g_strdup_printf("%s.mmio", sphb->dtbusname); 1611 memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX); 1612 g_free(namebuf); 1613 1614 namebuf = g_strdup_printf("%s.mmio32-alias", sphb->dtbusname); 1615 memory_region_init_alias(&sphb->mem32window, OBJECT(sphb), 1616 namebuf, &sphb->memspace, 1617 SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size); 1618 g_free(namebuf); 1619 memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr, 1620 &sphb->mem32window); 1621 1622 if (sphb->mem64_win_size != 0) { 1623 namebuf = g_strdup_printf("%s.mmio64-alias", sphb->dtbusname); 1624 memory_region_init_alias(&sphb->mem64window, OBJECT(sphb), 1625 namebuf, &sphb->memspace, 1626 sphb->mem64_win_pciaddr, sphb->mem64_win_size); 1627 g_free(namebuf); 1628 1629 memory_region_add_subregion(get_system_memory(), 1630 sphb->mem64_win_addr, 1631 &sphb->mem64window); 1632 } 1633 1634 /* Initialize IO regions */ 1635 namebuf = g_strdup_printf("%s.io", sphb->dtbusname); 1636 memory_region_init(&sphb->iospace, OBJECT(sphb), 1637 namebuf, SPAPR_PCI_IO_WIN_SIZE); 1638 g_free(namebuf); 1639 1640 namebuf = g_strdup_printf("%s.io-alias", sphb->dtbusname); 1641 memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf, 1642 &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE); 1643 g_free(namebuf); 1644 memory_region_add_subregion(get_system_memory(), sphb->io_win_addr, 1645 &sphb->iowindow); 1646 1647 bus = pci_register_root_bus(dev, NULL, 1648 pci_spapr_set_irq, pci_spapr_map_irq, sphb, 1649 &sphb->memspace, &sphb->iospace, 1650 PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS); 1651 phb->bus = bus; 1652 qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL); 1653 1654 /* 1655 * Initialize PHB address space. 1656 * By default there will be at least one subregion for default 1657 * 32bit DMA window. 1658 * Later the guest might want to create another DMA window 1659 * which will become another memory subregion. 1660 */ 1661 namebuf = g_strdup_printf("%s.iommu-root", sphb->dtbusname); 1662 memory_region_init(&sphb->iommu_root, OBJECT(sphb), 1663 namebuf, UINT64_MAX); 1664 g_free(namebuf); 1665 address_space_init(&sphb->iommu_as, &sphb->iommu_root, 1666 sphb->dtbusname); 1667 1668 /* 1669 * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors, 1670 * we need to allocate some memory to catch those writes coming 1671 * from msi_notify()/msix_notify(). 1672 * As MSIMessage:addr is going to be the same and MSIMessage:data 1673 * is going to be a VIRQ number, 4 bytes of the MSI MR will only 1674 * be used. 1675 * 1676 * For KVM we want to ensure that this memory is a full page so that 1677 * our memory slot is of page size granularity. 1678 */ 1679 #ifdef CONFIG_KVM 1680 if (kvm_enabled()) { 1681 msi_window_size = getpagesize(); 1682 } 1683 #endif 1684 1685 memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr, 1686 "msi", msi_window_size); 1687 memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW, 1688 &sphb->msiwindow); 1689 1690 pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb); 1691 1692 pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq); 1693 1694 QLIST_INSERT_HEAD(&spapr->phbs, sphb, list); 1695 1696 /* Initialize the LSI table */ 1697 for (i = 0; i < PCI_NUM_PINS; i++) { 1698 uint32_t irq; 1699 Error *local_err = NULL; 1700 1701 irq = spapr_irq_alloc_block(spapr, 1, true, false, &local_err); 1702 if (local_err) { 1703 error_propagate(errp, local_err); 1704 error_prepend(errp, "can't allocate LSIs: "); 1705 return; 1706 } 1707 1708 sphb->lsi_table[i].irq = irq; 1709 } 1710 1711 /* allocate connectors for child PCI devices */ 1712 if (sphb->dr_enabled) { 1713 for (i = 0; i < PCI_SLOT_MAX * 8; i++) { 1714 spapr_dr_connector_new(OBJECT(phb), TYPE_SPAPR_DRC_PCI, 1715 (sphb->index << 16) | i); 1716 } 1717 } 1718 1719 /* DMA setup */ 1720 if (((sphb->page_size_mask & qemu_getrampagesize()) == 0) 1721 && kvm_enabled()) { 1722 warn_report("System page size 0x%lx is not enabled in page_size_mask " 1723 "(0x%"PRIx64"). Performance may be slow", 1724 qemu_getrampagesize(), sphb->page_size_mask); 1725 } 1726 1727 for (i = 0; i < windows_supported; ++i) { 1728 tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn[i]); 1729 if (!tcet) { 1730 error_setg(errp, "Creating window#%d failed for %s", 1731 i, sphb->dtbusname); 1732 return; 1733 } 1734 memory_region_add_subregion(&sphb->iommu_root, 0, 1735 spapr_tce_get_iommu(tcet)); 1736 } 1737 1738 sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free); 1739 } 1740 1741 static int spapr_phb_children_reset(Object *child, void *opaque) 1742 { 1743 DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE); 1744 1745 if (dev) { 1746 device_reset(dev); 1747 } 1748 1749 return 0; 1750 } 1751 1752 void spapr_phb_dma_reset(sPAPRPHBState *sphb) 1753 { 1754 int i; 1755 sPAPRTCETable *tcet; 1756 1757 for (i = 0; i < SPAPR_PCI_DMA_MAX_WINDOWS; ++i) { 1758 tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]); 1759 1760 if (tcet && tcet->nb_table) { 1761 spapr_tce_table_disable(tcet); 1762 } 1763 } 1764 1765 /* Register default 32bit DMA window */ 1766 tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[0]); 1767 spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr, 1768 sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT); 1769 } 1770 1771 static void spapr_phb_reset(DeviceState *qdev) 1772 { 1773 sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev); 1774 1775 spapr_phb_dma_reset(sphb); 1776 1777 /* Reset the IOMMU state */ 1778 object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL); 1779 1780 if (spapr_phb_eeh_available(SPAPR_PCI_HOST_BRIDGE(qdev))) { 1781 spapr_phb_vfio_reset(qdev); 1782 } 1783 } 1784 1785 static Property spapr_phb_properties[] = { 1786 DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1), 1787 DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size, 1788 SPAPR_PCI_MEM32_WIN_SIZE), 1789 DEFINE_PROP_UINT64("mem64_win_size", sPAPRPHBState, mem64_win_size, 1790 SPAPR_PCI_MEM64_WIN_SIZE), 1791 DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size, 1792 SPAPR_PCI_IO_WIN_SIZE), 1793 DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled, 1794 true), 1795 /* Default DMA window is 0..1GB */ 1796 DEFINE_PROP_UINT64("dma_win_addr", sPAPRPHBState, dma_win_addr, 0), 1797 DEFINE_PROP_UINT64("dma_win_size", sPAPRPHBState, dma_win_size, 0x40000000), 1798 DEFINE_PROP_UINT64("dma64_win_addr", sPAPRPHBState, dma64_win_addr, 1799 0x800000000000000ULL), 1800 DEFINE_PROP_BOOL("ddw", sPAPRPHBState, ddw_enabled, true), 1801 DEFINE_PROP_UINT64("pgsz", sPAPRPHBState, page_size_mask, 1802 (1ULL << 12) | (1ULL << 16)), 1803 DEFINE_PROP_UINT32("numa_node", sPAPRPHBState, numa_node, -1), 1804 DEFINE_PROP_BOOL("pre-2.8-migration", sPAPRPHBState, 1805 pre_2_8_migration, false), 1806 DEFINE_PROP_BOOL("pcie-extended-configuration-space", sPAPRPHBState, 1807 pcie_ecs, true), 1808 DEFINE_PROP_END_OF_LIST(), 1809 }; 1810 1811 static const VMStateDescription vmstate_spapr_pci_lsi = { 1812 .name = "spapr_pci/lsi", 1813 .version_id = 1, 1814 .minimum_version_id = 1, 1815 .fields = (VMStateField[]) { 1816 VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi, NULL), 1817 1818 VMSTATE_END_OF_LIST() 1819 }, 1820 }; 1821 1822 static const VMStateDescription vmstate_spapr_pci_msi = { 1823 .name = "spapr_pci/msi", 1824 .version_id = 1, 1825 .minimum_version_id = 1, 1826 .fields = (VMStateField []) { 1827 VMSTATE_UINT32(key, spapr_pci_msi_mig), 1828 VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig), 1829 VMSTATE_UINT32(value.num, spapr_pci_msi_mig), 1830 VMSTATE_END_OF_LIST() 1831 }, 1832 }; 1833 1834 static int spapr_pci_pre_save(void *opaque) 1835 { 1836 sPAPRPHBState *sphb = opaque; 1837 GHashTableIter iter; 1838 gpointer key, value; 1839 int i; 1840 1841 if (sphb->pre_2_8_migration) { 1842 sphb->mig_liobn = sphb->dma_liobn[0]; 1843 sphb->mig_mem_win_addr = sphb->mem_win_addr; 1844 sphb->mig_mem_win_size = sphb->mem_win_size; 1845 sphb->mig_io_win_addr = sphb->io_win_addr; 1846 sphb->mig_io_win_size = sphb->io_win_size; 1847 1848 if ((sphb->mem64_win_size != 0) 1849 && (sphb->mem64_win_addr 1850 == (sphb->mem_win_addr + sphb->mem_win_size))) { 1851 sphb->mig_mem_win_size += sphb->mem64_win_size; 1852 } 1853 } 1854 1855 g_free(sphb->msi_devs); 1856 sphb->msi_devs = NULL; 1857 sphb->msi_devs_num = g_hash_table_size(sphb->msi); 1858 if (!sphb->msi_devs_num) { 1859 return 0; 1860 } 1861 sphb->msi_devs = g_malloc(sphb->msi_devs_num * sizeof(spapr_pci_msi_mig)); 1862 1863 g_hash_table_iter_init(&iter, sphb->msi); 1864 for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) { 1865 sphb->msi_devs[i].key = *(uint32_t *) key; 1866 sphb->msi_devs[i].value = *(spapr_pci_msi *) value; 1867 } 1868 1869 return 0; 1870 } 1871 1872 static int spapr_pci_post_load(void *opaque, int version_id) 1873 { 1874 sPAPRPHBState *sphb = opaque; 1875 gpointer key, value; 1876 int i; 1877 1878 for (i = 0; i < sphb->msi_devs_num; ++i) { 1879 key = g_memdup(&sphb->msi_devs[i].key, 1880 sizeof(sphb->msi_devs[i].key)); 1881 value = g_memdup(&sphb->msi_devs[i].value, 1882 sizeof(sphb->msi_devs[i].value)); 1883 g_hash_table_insert(sphb->msi, key, value); 1884 } 1885 g_free(sphb->msi_devs); 1886 sphb->msi_devs = NULL; 1887 sphb->msi_devs_num = 0; 1888 1889 return 0; 1890 } 1891 1892 static bool pre_2_8_migration(void *opaque, int version_id) 1893 { 1894 sPAPRPHBState *sphb = opaque; 1895 1896 return sphb->pre_2_8_migration; 1897 } 1898 1899 static const VMStateDescription vmstate_spapr_pci = { 1900 .name = "spapr_pci", 1901 .version_id = 2, 1902 .minimum_version_id = 2, 1903 .pre_save = spapr_pci_pre_save, 1904 .post_load = spapr_pci_post_load, 1905 .fields = (VMStateField[]) { 1906 VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState, NULL), 1907 VMSTATE_UINT32_TEST(mig_liobn, sPAPRPHBState, pre_2_8_migration), 1908 VMSTATE_UINT64_TEST(mig_mem_win_addr, sPAPRPHBState, pre_2_8_migration), 1909 VMSTATE_UINT64_TEST(mig_mem_win_size, sPAPRPHBState, pre_2_8_migration), 1910 VMSTATE_UINT64_TEST(mig_io_win_addr, sPAPRPHBState, pre_2_8_migration), 1911 VMSTATE_UINT64_TEST(mig_io_win_size, sPAPRPHBState, pre_2_8_migration), 1912 VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0, 1913 vmstate_spapr_pci_lsi, struct spapr_pci_lsi), 1914 VMSTATE_INT32(msi_devs_num, sPAPRPHBState), 1915 VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, sPAPRPHBState, msi_devs_num, 0, 1916 vmstate_spapr_pci_msi, spapr_pci_msi_mig), 1917 VMSTATE_END_OF_LIST() 1918 }, 1919 }; 1920 1921 static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge, 1922 PCIBus *rootbus) 1923 { 1924 sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge); 1925 1926 return sphb->dtbusname; 1927 } 1928 1929 static void spapr_phb_class_init(ObjectClass *klass, void *data) 1930 { 1931 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); 1932 DeviceClass *dc = DEVICE_CLASS(klass); 1933 HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass); 1934 1935 hc->root_bus_path = spapr_phb_root_bus_path; 1936 dc->realize = spapr_phb_realize; 1937 dc->props = spapr_phb_properties; 1938 dc->reset = spapr_phb_reset; 1939 dc->vmsd = &vmstate_spapr_pci; 1940 /* Supported by TYPE_SPAPR_MACHINE */ 1941 dc->user_creatable = true; 1942 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 1943 hp->plug = spapr_pci_plug; 1944 hp->unplug_request = spapr_pci_unplug_request; 1945 } 1946 1947 static const TypeInfo spapr_phb_info = { 1948 .name = TYPE_SPAPR_PCI_HOST_BRIDGE, 1949 .parent = TYPE_PCI_HOST_BRIDGE, 1950 .instance_size = sizeof(sPAPRPHBState), 1951 .class_init = spapr_phb_class_init, 1952 .interfaces = (InterfaceInfo[]) { 1953 { TYPE_HOTPLUG_HANDLER }, 1954 { } 1955 } 1956 }; 1957 1958 PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index) 1959 { 1960 DeviceState *dev; 1961 1962 dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE); 1963 qdev_prop_set_uint32(dev, "index", index); 1964 qdev_init_nofail(dev); 1965 1966 return PCI_HOST_BRIDGE(dev); 1967 } 1968 1969 typedef struct sPAPRFDT { 1970 void *fdt; 1971 int node_off; 1972 sPAPRPHBState *sphb; 1973 } sPAPRFDT; 1974 1975 static void spapr_populate_pci_devices_dt(PCIBus *bus, PCIDevice *pdev, 1976 void *opaque) 1977 { 1978 PCIBus *sec_bus; 1979 sPAPRFDT *p = opaque; 1980 int offset; 1981 sPAPRFDT s_fdt; 1982 1983 offset = spapr_create_pci_child_dt(p->sphb, pdev, p->fdt, p->node_off); 1984 if (!offset) { 1985 error_report("Failed to create pci child device tree node"); 1986 return; 1987 } 1988 1989 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != 1990 PCI_HEADER_TYPE_BRIDGE)) { 1991 return; 1992 } 1993 1994 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); 1995 if (!sec_bus) { 1996 return; 1997 } 1998 1999 s_fdt.fdt = p->fdt; 2000 s_fdt.node_off = offset; 2001 s_fdt.sphb = p->sphb; 2002 pci_for_each_device_reverse(sec_bus, pci_bus_num(sec_bus), 2003 spapr_populate_pci_devices_dt, 2004 &s_fdt); 2005 } 2006 2007 static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, 2008 void *opaque) 2009 { 2010 unsigned int *bus_no = opaque; 2011 unsigned int primary = *bus_no; 2012 unsigned int subordinate = 0xff; 2013 PCIBus *sec_bus = NULL; 2014 2015 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != 2016 PCI_HEADER_TYPE_BRIDGE)) { 2017 return; 2018 } 2019 2020 (*bus_no)++; 2021 pci_default_write_config(pdev, PCI_PRIMARY_BUS, primary, 1); 2022 pci_default_write_config(pdev, PCI_SECONDARY_BUS, *bus_no, 1); 2023 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1); 2024 2025 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); 2026 if (!sec_bus) { 2027 return; 2028 } 2029 2030 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, subordinate, 1); 2031 pci_for_each_device(sec_bus, pci_bus_num(sec_bus), 2032 spapr_phb_pci_enumerate_bridge, bus_no); 2033 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1); 2034 } 2035 2036 static void spapr_phb_pci_enumerate(sPAPRPHBState *phb) 2037 { 2038 PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus; 2039 unsigned int bus_no = 0; 2040 2041 pci_for_each_device(bus, pci_bus_num(bus), 2042 spapr_phb_pci_enumerate_bridge, 2043 &bus_no); 2044 2045 } 2046 2047 int spapr_populate_pci_dt(sPAPRPHBState *phb, 2048 uint32_t xics_phandle, 2049 void *fdt) 2050 { 2051 int bus_off, i, j, ret; 2052 gchar *nodename; 2053 uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) }; 2054 struct { 2055 uint32_t hi; 2056 uint64_t child; 2057 uint64_t parent; 2058 uint64_t size; 2059 } QEMU_PACKED ranges[] = { 2060 { 2061 cpu_to_be32(b_ss(1)), cpu_to_be64(0), 2062 cpu_to_be64(phb->io_win_addr), 2063 cpu_to_be64(memory_region_size(&phb->iospace)), 2064 }, 2065 { 2066 cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET), 2067 cpu_to_be64(phb->mem_win_addr), 2068 cpu_to_be64(phb->mem_win_size), 2069 }, 2070 { 2071 cpu_to_be32(b_ss(3)), cpu_to_be64(phb->mem64_win_pciaddr), 2072 cpu_to_be64(phb->mem64_win_addr), 2073 cpu_to_be64(phb->mem64_win_size), 2074 }, 2075 }; 2076 const unsigned sizeof_ranges = 2077 (phb->mem64_win_size ? 3 : 2) * sizeof(ranges[0]); 2078 uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 }; 2079 uint32_t interrupt_map_mask[] = { 2080 cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)}; 2081 uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7]; 2082 uint32_t ddw_applicable[] = { 2083 cpu_to_be32(RTAS_IBM_QUERY_PE_DMA_WINDOW), 2084 cpu_to_be32(RTAS_IBM_CREATE_PE_DMA_WINDOW), 2085 cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW) 2086 }; 2087 uint32_t ddw_extensions[] = { 2088 cpu_to_be32(1), 2089 cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW) 2090 }; 2091 uint32_t associativity[] = {cpu_to_be32(0x4), 2092 cpu_to_be32(0x0), 2093 cpu_to_be32(0x0), 2094 cpu_to_be32(0x0), 2095 cpu_to_be32(phb->numa_node)}; 2096 sPAPRTCETable *tcet; 2097 PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus; 2098 sPAPRFDT s_fdt; 2099 2100 /* Start populating the FDT */ 2101 nodename = g_strdup_printf("pci@%" PRIx64, phb->buid); 2102 _FDT(bus_off = fdt_add_subnode(fdt, 0, nodename)); 2103 g_free(nodename); 2104 2105 /* Write PHB properties */ 2106 _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci")); 2107 _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB")); 2108 _FDT(fdt_setprop_cell(fdt, bus_off, "#address-cells", 0x3)); 2109 _FDT(fdt_setprop_cell(fdt, bus_off, "#size-cells", 0x2)); 2110 _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1)); 2111 _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0)); 2112 _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range))); 2113 _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges)); 2114 _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg))); 2115 _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1)); 2116 _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS_SPAPR)); 2117 2118 /* Dynamic DMA window */ 2119 if (phb->ddw_enabled) { 2120 _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-applicable", &ddw_applicable, 2121 sizeof(ddw_applicable))); 2122 _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-extensions", 2123 &ddw_extensions, sizeof(ddw_extensions))); 2124 } 2125 2126 /* Advertise NUMA via ibm,associativity */ 2127 if (phb->numa_node != -1) { 2128 _FDT(fdt_setprop(fdt, bus_off, "ibm,associativity", associativity, 2129 sizeof(associativity))); 2130 } 2131 2132 /* Build the interrupt-map, this must matches what is done 2133 * in pci_spapr_map_irq 2134 */ 2135 _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask", 2136 &interrupt_map_mask, sizeof(interrupt_map_mask))); 2137 for (i = 0; i < PCI_SLOT_MAX; i++) { 2138 for (j = 0; j < PCI_NUM_PINS; j++) { 2139 uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j]; 2140 int lsi_num = pci_spapr_swizzle(i, j); 2141 2142 irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0)); 2143 irqmap[1] = 0; 2144 irqmap[2] = 0; 2145 irqmap[3] = cpu_to_be32(j+1); 2146 irqmap[4] = cpu_to_be32(xics_phandle); 2147 spapr_dt_xics_irq(&irqmap[5], phb->lsi_table[lsi_num].irq, true); 2148 } 2149 } 2150 /* Write interrupt map */ 2151 _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map, 2152 sizeof(interrupt_map))); 2153 2154 tcet = spapr_tce_find_by_liobn(phb->dma_liobn[0]); 2155 if (!tcet) { 2156 return -1; 2157 } 2158 spapr_dma_dt(fdt, bus_off, "ibm,dma-window", 2159 tcet->liobn, tcet->bus_offset, 2160 tcet->nb_table << tcet->page_shift); 2161 2162 /* Walk the bridges and program the bus numbers*/ 2163 spapr_phb_pci_enumerate(phb); 2164 _FDT(fdt_setprop_cell(fdt, bus_off, "qemu,phb-enumerated", 0x1)); 2165 2166 /* Populate tree nodes with PCI devices attached */ 2167 s_fdt.fdt = fdt; 2168 s_fdt.node_off = bus_off; 2169 s_fdt.sphb = phb; 2170 pci_for_each_device_reverse(bus, pci_bus_num(bus), 2171 spapr_populate_pci_devices_dt, 2172 &s_fdt); 2173 2174 ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb), 2175 SPAPR_DR_CONNECTOR_TYPE_PCI); 2176 if (ret) { 2177 return ret; 2178 } 2179 2180 return 0; 2181 } 2182 2183 void spapr_pci_rtas_init(void) 2184 { 2185 spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config", 2186 rtas_read_pci_config); 2187 spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config", 2188 rtas_write_pci_config); 2189 spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config", 2190 rtas_ibm_read_pci_config); 2191 spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config", 2192 rtas_ibm_write_pci_config); 2193 if (msi_nonbroken) { 2194 spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER, 2195 "ibm,query-interrupt-source-number", 2196 rtas_ibm_query_interrupt_source_number); 2197 spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi", 2198 rtas_ibm_change_msi); 2199 } 2200 2201 spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION, 2202 "ibm,set-eeh-option", 2203 rtas_ibm_set_eeh_option); 2204 spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2, 2205 "ibm,get-config-addr-info2", 2206 rtas_ibm_get_config_addr_info2); 2207 spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2, 2208 "ibm,read-slot-reset-state2", 2209 rtas_ibm_read_slot_reset_state2); 2210 spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET, 2211 "ibm,set-slot-reset", 2212 rtas_ibm_set_slot_reset); 2213 spapr_rtas_register(RTAS_IBM_CONFIGURE_PE, 2214 "ibm,configure-pe", 2215 rtas_ibm_configure_pe); 2216 spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL, 2217 "ibm,slot-error-detail", 2218 rtas_ibm_slot_error_detail); 2219 } 2220 2221 static void spapr_pci_register_types(void) 2222 { 2223 type_register_static(&spapr_phb_info); 2224 } 2225 2226 type_init(spapr_pci_register_types) 2227 2228 static int spapr_switch_one_vga(DeviceState *dev, void *opaque) 2229 { 2230 bool be = *(bool *)opaque; 2231 2232 if (object_dynamic_cast(OBJECT(dev), "VGA") 2233 || object_dynamic_cast(OBJECT(dev), "secondary-vga")) { 2234 object_property_set_bool(OBJECT(dev), be, "big-endian-framebuffer", 2235 &error_abort); 2236 } 2237 return 0; 2238 } 2239 2240 void spapr_pci_switch_vga(bool big_endian) 2241 { 2242 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 2243 sPAPRPHBState *sphb; 2244 2245 /* 2246 * For backward compatibility with existing guests, we switch 2247 * the endianness of the VGA controller when changing the guest 2248 * interrupt mode 2249 */ 2250 QLIST_FOREACH(sphb, &spapr->phbs, list) { 2251 BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus; 2252 qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL, 2253 &big_endian); 2254 } 2255 } 2256