1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * The file intends to implement the platform dependent EEH operations on pseries. 4 * Actually, the pseries platform is built based on RTAS heavily. That means the 5 * pseries platform dependent EEH operations will be built on RTAS calls. The functions 6 * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has 7 * been done. 8 * 9 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. 10 * Copyright IBM Corporation 2001, 2005, 2006 11 * Copyright Dave Engebretsen & Todd Inglett 2001 12 * Copyright Linas Vepstas 2005, 2006 13 */ 14 15 #include <linux/atomic.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/of.h> 21 #include <linux/pci.h> 22 #include <linux/proc_fs.h> 23 #include <linux/rbtree.h> 24 #include <linux/sched.h> 25 #include <linux/seq_file.h> 26 #include <linux/spinlock.h> 27 28 #include <asm/eeh.h> 29 #include <asm/eeh_event.h> 30 #include <asm/io.h> 31 #include <asm/machdep.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/rtas.h> 34 35 /* RTAS tokens */ 36 static int ibm_set_eeh_option; 37 static int ibm_set_slot_reset; 38 static int ibm_read_slot_reset_state; 39 static int ibm_read_slot_reset_state2; 40 static int ibm_slot_error_detail; 41 static int ibm_get_config_addr_info; 42 static int ibm_get_config_addr_info2; 43 static int ibm_configure_pe; 44 45 void pseries_pcibios_bus_add_device(struct pci_dev *pdev) 46 { 47 struct pci_dn *pdn = pci_get_pdn(pdev); 48 49 if (eeh_has_flag(EEH_FORCE_DISABLED)) 50 return; 51 52 dev_dbg(&pdev->dev, "EEH: Setting up device\n"); 53 #ifdef CONFIG_PCI_IOV 54 if (pdev->is_virtfn) { 55 struct pci_dn *physfn_pdn; 56 57 pdn->device_id = pdev->device; 58 pdn->vendor_id = pdev->vendor; 59 pdn->class_code = pdev->class; 60 /* 61 * Last allow unfreeze return code used for retrieval 62 * by user space in eeh-sysfs to show the last command 63 * completion from platform. 64 */ 65 pdn->last_allow_rc = 0; 66 physfn_pdn = pci_get_pdn(pdev->physfn); 67 pdn->pe_number = physfn_pdn->pe_num_map[pdn->vf_index]; 68 } 69 #endif 70 pseries_eeh_init_edev(pdn); 71 #ifdef CONFIG_PCI_IOV 72 if (pdev->is_virtfn) { 73 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 74 75 edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8); 76 eeh_rmv_from_parent_pe(edev); /* Remove as it is adding to bus pe */ 77 eeh_add_to_parent_pe(edev); /* Add as VF PE type */ 78 } 79 #endif 80 eeh_probe_device(pdev); 81 } 82 83 /* 84 * Buffer for reporting slot-error-detail rtas calls. Its here 85 * in BSS, and not dynamically alloced, so that it ends up in 86 * RMO where RTAS can access it. 87 */ 88 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; 89 static DEFINE_SPINLOCK(slot_errbuf_lock); 90 static int eeh_error_buf_size; 91 92 /** 93 * pseries_eeh_init - EEH platform dependent initialization 94 * 95 * EEH platform dependent initialization on pseries. 96 */ 97 static int pseries_eeh_init(void) 98 { 99 /* figure out EEH RTAS function call tokens */ 100 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); 101 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); 102 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); 103 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); 104 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); 105 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 106 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 107 ibm_configure_pe = rtas_token("ibm,configure-pe"); 108 109 /* 110 * ibm,configure-pe and ibm,configure-bridge have the same semantics, 111 * however ibm,configure-pe can be faster. If we can't find 112 * ibm,configure-pe then fall back to using ibm,configure-bridge. 113 */ 114 if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) 115 ibm_configure_pe = rtas_token("ibm,configure-bridge"); 116 117 /* 118 * Necessary sanity check. We needn't check "get-config-addr-info" 119 * and its variant since the old firmware probably support address 120 * of domain/bus/slot/function for EEH RTAS operations. 121 */ 122 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE || 123 ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE || 124 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && 125 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || 126 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || 127 ibm_configure_pe == RTAS_UNKNOWN_SERVICE) { 128 pr_info("EEH functionality not supported\n"); 129 return -EINVAL; 130 } 131 132 /* Initialize error log lock and size */ 133 spin_lock_init(&slot_errbuf_lock); 134 eeh_error_buf_size = rtas_token("rtas-error-log-max"); 135 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { 136 pr_info("%s: unknown EEH error log size\n", 137 __func__); 138 eeh_error_buf_size = 1024; 139 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { 140 pr_info("%s: EEH error log size %d exceeds the maximal %d\n", 141 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); 142 eeh_error_buf_size = RTAS_ERROR_LOG_MAX; 143 } 144 145 /* Set EEH probe mode */ 146 eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); 147 148 /* Set EEH machine dependent code */ 149 ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device; 150 151 return 0; 152 } 153 154 static int pseries_eeh_cap_start(struct pci_dn *pdn) 155 { 156 u32 status; 157 158 if (!pdn) 159 return 0; 160 161 rtas_read_config(pdn, PCI_STATUS, 2, &status); 162 if (!(status & PCI_STATUS_CAP_LIST)) 163 return 0; 164 165 return PCI_CAPABILITY_LIST; 166 } 167 168 169 static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap) 170 { 171 int pos = pseries_eeh_cap_start(pdn); 172 int cnt = 48; /* Maximal number of capabilities */ 173 u32 id; 174 175 if (!pos) 176 return 0; 177 178 while (cnt--) { 179 rtas_read_config(pdn, pos, 1, &pos); 180 if (pos < 0x40) 181 break; 182 pos &= ~3; 183 rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 184 if (id == 0xff) 185 break; 186 if (id == cap) 187 return pos; 188 pos += PCI_CAP_LIST_NEXT; 189 } 190 191 return 0; 192 } 193 194 static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap) 195 { 196 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 197 u32 header; 198 int pos = 256; 199 int ttl = (4096 - 256) / 8; 200 201 if (!edev || !edev->pcie_cap) 202 return 0; 203 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 204 return 0; 205 else if (!header) 206 return 0; 207 208 while (ttl-- > 0) { 209 if (PCI_EXT_CAP_ID(header) == cap && pos) 210 return pos; 211 212 pos = PCI_EXT_CAP_NEXT(header); 213 if (pos < 256) 214 break; 215 216 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 217 break; 218 } 219 220 return 0; 221 } 222 223 /** 224 * pseries_eeh_init_edev - initialise the eeh_dev and eeh_pe for a pci_dn 225 * 226 * @pdn: PCI device node 227 * 228 * When we discover a new PCI device via the device-tree we create a 229 * corresponding pci_dn and we allocate, but don't initialise, an eeh_dev. 230 * This function takes care of the initialisation and inserts the eeh_dev 231 * into the correct eeh_pe. If no eeh_pe exists we'll allocate one. 232 */ 233 void pseries_eeh_init_edev(struct pci_dn *pdn) 234 { 235 struct eeh_dev *edev; 236 struct eeh_pe pe; 237 u32 pcie_flags; 238 int enable = 0; 239 int ret; 240 241 if (WARN_ON_ONCE(!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))) 242 return; 243 244 /* 245 * Find the eeh_dev for this pdn. The storage for the eeh_dev was 246 * allocated at the same time as the pci_dn. 247 * 248 * XXX: We should probably re-visit that. 249 */ 250 edev = pdn_to_eeh_dev(pdn); 251 if (!edev) 252 return; 253 254 /* 255 * If ->pe is set then we've already probed this device. We hit 256 * this path when a pci_dev is removed and rescanned while recovering 257 * a PE (i.e. for devices where the driver doesn't support error 258 * recovery). 259 */ 260 if (edev->pe) 261 return; 262 263 /* Check class/vendor/device IDs */ 264 if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code) 265 return; 266 267 /* Skip for PCI-ISA bridge */ 268 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) 269 return; 270 271 eeh_edev_dbg(edev, "Probing device\n"); 272 273 /* 274 * Update class code and mode of eeh device. We need 275 * correctly reflects that current device is root port 276 * or PCIe switch downstream port. 277 */ 278 edev->class_code = pdn->class_code; 279 edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); 280 edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP); 281 edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); 282 edev->mode &= 0xFFFFFF00; 283 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 284 edev->mode |= EEH_DEV_BRIDGE; 285 if (edev->pcie_cap) { 286 rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 287 2, &pcie_flags); 288 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 289 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 290 edev->mode |= EEH_DEV_ROOT_PORT; 291 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 292 edev->mode |= EEH_DEV_DS_PORT; 293 } 294 } 295 296 /* Initialize the fake PE */ 297 memset(&pe, 0, sizeof(struct eeh_pe)); 298 pe.phb = pdn->phb; 299 pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8); 300 301 /* Enable EEH on the device */ 302 eeh_edev_dbg(edev, "Enabling EEH on device\n"); 303 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); 304 if (ret) { 305 eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret); 306 } else { 307 /* Retrieve PE address */ 308 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); 309 pe.addr = edev->pe_config_addr; 310 311 /* Some older systems (Power4) allow the ibm,set-eeh-option 312 * call to succeed even on nodes where EEH is not supported. 313 * Verify support explicitly. 314 */ 315 ret = eeh_ops->get_state(&pe, NULL); 316 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) 317 enable = 1; 318 319 if (enable) { 320 eeh_add_flag(EEH_ENABLED); 321 eeh_add_to_parent_pe(edev); 322 } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) && 323 (pdn_to_eeh_dev(pdn->parent))->pe) { 324 /* This device doesn't support EEH, but it may have an 325 * EEH parent, in which case we mark it as supported. 326 */ 327 edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr; 328 eeh_add_to_parent_pe(edev); 329 } 330 eeh_edev_dbg(edev, "EEH is %s on device (code %d)\n", 331 (enable ? "enabled" : "unsupported"), ret); 332 } 333 334 /* Save memory bars */ 335 eeh_save_bars(edev); 336 } 337 338 static struct eeh_dev *pseries_eeh_probe(struct pci_dev *pdev) 339 { 340 struct eeh_dev *edev; 341 struct pci_dn *pdn; 342 343 pdn = pci_get_pdn_by_devfn(pdev->bus, pdev->devfn); 344 if (!pdn) 345 return NULL; 346 347 /* 348 * If the system supports EEH on this device then the eeh_dev was 349 * configured and inserted into a PE in pseries_eeh_init_edev() 350 */ 351 edev = pdn_to_eeh_dev(pdn); 352 if (!edev || !edev->pe) 353 return NULL; 354 355 return edev; 356 } 357 358 /** 359 * pseries_eeh_init_edev_recursive - Enable EEH for the indicated device 360 * @pdn: PCI device node 361 * 362 * This routine must be used to perform EEH initialization for the 363 * indicated PCI device that was added after system boot (e.g. 364 * hotplug, dlpar). 365 */ 366 void pseries_eeh_init_edev_recursive(struct pci_dn *pdn) 367 { 368 struct pci_dn *n; 369 370 if (!pdn) 371 return; 372 373 list_for_each_entry(n, &pdn->child_list, list) 374 pseries_eeh_init_edev_recursive(n); 375 376 pseries_eeh_init_edev(pdn); 377 } 378 EXPORT_SYMBOL_GPL(pseries_eeh_init_edev_recursive); 379 380 /** 381 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable 382 * @pe: EEH PE 383 * @option: operation to be issued 384 * 385 * The function is used to control the EEH functionality globally. 386 * Currently, following options are support according to PAPR: 387 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 388 */ 389 static int pseries_eeh_set_option(struct eeh_pe *pe, int option) 390 { 391 int ret = 0; 392 int config_addr; 393 394 /* 395 * When we're enabling or disabling EEH functioality on 396 * the particular PE, the PE config address is possibly 397 * unavailable. Therefore, we have to figure it out from 398 * the FDT node. 399 */ 400 switch (option) { 401 case EEH_OPT_DISABLE: 402 case EEH_OPT_ENABLE: 403 case EEH_OPT_THAW_MMIO: 404 case EEH_OPT_THAW_DMA: 405 config_addr = pe->config_addr; 406 if (pe->addr) 407 config_addr = pe->addr; 408 break; 409 case EEH_OPT_FREEZE_PE: 410 /* Not support */ 411 return 0; 412 default: 413 pr_err("%s: Invalid option %d\n", 414 __func__, option); 415 return -EINVAL; 416 } 417 418 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 419 config_addr, BUID_HI(pe->phb->buid), 420 BUID_LO(pe->phb->buid), option); 421 422 return ret; 423 } 424 425 /** 426 * pseries_eeh_get_pe_addr - Retrieve PE address 427 * @pe: EEH PE 428 * 429 * Retrieve the assocated PE address. Actually, there're 2 RTAS 430 * function calls dedicated for the purpose. We need implement 431 * it through the new function and then the old one. Besides, 432 * you should make sure the config address is figured out from 433 * FDT node before calling the function. 434 * 435 * It's notable that zero'ed return value means invalid PE config 436 * address. 437 */ 438 static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) 439 { 440 int ret = 0; 441 int rets[3]; 442 443 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { 444 /* 445 * First of all, we need to make sure there has one PE 446 * associated with the device. Otherwise, PE address is 447 * meaningless. 448 */ 449 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 450 pe->config_addr, BUID_HI(pe->phb->buid), 451 BUID_LO(pe->phb->buid), 1); 452 if (ret || (rets[0] == 0)) 453 return 0; 454 455 /* Retrieve the associated PE config address */ 456 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 457 pe->config_addr, BUID_HI(pe->phb->buid), 458 BUID_LO(pe->phb->buid), 0); 459 if (ret) { 460 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", 461 __func__, pe->phb->global_number, pe->config_addr); 462 return 0; 463 } 464 465 return rets[0]; 466 } 467 468 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { 469 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, 470 pe->config_addr, BUID_HI(pe->phb->buid), 471 BUID_LO(pe->phb->buid), 0); 472 if (ret) { 473 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", 474 __func__, pe->phb->global_number, pe->config_addr); 475 return 0; 476 } 477 478 return rets[0]; 479 } 480 481 return ret; 482 } 483 484 /** 485 * pseries_eeh_get_state - Retrieve PE state 486 * @pe: EEH PE 487 * @delay: suggested time to wait if state is unavailable 488 * 489 * Retrieve the state of the specified PE. On RTAS compliant 490 * pseries platform, there already has one dedicated RTAS function 491 * for the purpose. It's notable that the associated PE config address 492 * might be ready when calling the function. Therefore, endeavour to 493 * use the PE config address if possible. Further more, there're 2 494 * RTAS calls for the purpose, we need to try the new one and back 495 * to the old one if the new one couldn't work properly. 496 */ 497 static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) 498 { 499 int config_addr; 500 int ret; 501 int rets[4]; 502 int result; 503 504 /* Figure out PE config address if possible */ 505 config_addr = pe->config_addr; 506 if (pe->addr) 507 config_addr = pe->addr; 508 509 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 510 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, 511 config_addr, BUID_HI(pe->phb->buid), 512 BUID_LO(pe->phb->buid)); 513 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { 514 /* Fake PE unavailable info */ 515 rets[2] = 0; 516 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, 517 config_addr, BUID_HI(pe->phb->buid), 518 BUID_LO(pe->phb->buid)); 519 } else { 520 return EEH_STATE_NOT_SUPPORT; 521 } 522 523 if (ret) 524 return ret; 525 526 /* Parse the result out */ 527 if (!rets[1]) 528 return EEH_STATE_NOT_SUPPORT; 529 530 switch(rets[0]) { 531 case 0: 532 result = EEH_STATE_MMIO_ACTIVE | 533 EEH_STATE_DMA_ACTIVE; 534 break; 535 case 1: 536 result = EEH_STATE_RESET_ACTIVE | 537 EEH_STATE_MMIO_ACTIVE | 538 EEH_STATE_DMA_ACTIVE; 539 break; 540 case 2: 541 result = 0; 542 break; 543 case 4: 544 result = EEH_STATE_MMIO_ENABLED; 545 break; 546 case 5: 547 if (rets[2]) { 548 if (delay) 549 *delay = rets[2]; 550 result = EEH_STATE_UNAVAILABLE; 551 } else { 552 result = EEH_STATE_NOT_SUPPORT; 553 } 554 break; 555 default: 556 result = EEH_STATE_NOT_SUPPORT; 557 } 558 559 return result; 560 } 561 562 /** 563 * pseries_eeh_reset - Reset the specified PE 564 * @pe: EEH PE 565 * @option: reset option 566 * 567 * Reset the specified PE 568 */ 569 static int pseries_eeh_reset(struct eeh_pe *pe, int option) 570 { 571 int config_addr; 572 int ret; 573 574 /* Figure out PE address */ 575 config_addr = pe->config_addr; 576 if (pe->addr) 577 config_addr = pe->addr; 578 579 /* Reset PE through RTAS call */ 580 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 581 config_addr, BUID_HI(pe->phb->buid), 582 BUID_LO(pe->phb->buid), option); 583 584 /* If fundamental-reset not supported, try hot-reset */ 585 if (option == EEH_RESET_FUNDAMENTAL && 586 ret == -8) { 587 option = EEH_RESET_HOT; 588 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 589 config_addr, BUID_HI(pe->phb->buid), 590 BUID_LO(pe->phb->buid), option); 591 } 592 593 /* We need reset hold or settlement delay */ 594 if (option == EEH_RESET_FUNDAMENTAL || 595 option == EEH_RESET_HOT) 596 msleep(EEH_PE_RST_HOLD_TIME); 597 else 598 msleep(EEH_PE_RST_SETTLE_TIME); 599 600 return ret; 601 } 602 603 /** 604 * pseries_eeh_get_log - Retrieve error log 605 * @pe: EEH PE 606 * @severity: temporary or permanent error log 607 * @drv_log: driver log to be combined with retrieved error log 608 * @len: length of driver log 609 * 610 * Retrieve the temporary or permanent error from the PE. 611 * Actually, the error will be retrieved through the dedicated 612 * RTAS call. 613 */ 614 static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) 615 { 616 int config_addr; 617 unsigned long flags; 618 int ret; 619 620 spin_lock_irqsave(&slot_errbuf_lock, flags); 621 memset(slot_errbuf, 0, eeh_error_buf_size); 622 623 /* Figure out the PE address */ 624 config_addr = pe->config_addr; 625 if (pe->addr) 626 config_addr = pe->addr; 627 628 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, 629 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), 630 virt_to_phys(drv_log), len, 631 virt_to_phys(slot_errbuf), eeh_error_buf_size, 632 severity); 633 if (!ret) 634 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); 635 spin_unlock_irqrestore(&slot_errbuf_lock, flags); 636 637 return ret; 638 } 639 640 /** 641 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE 642 * @pe: EEH PE 643 * 644 * The function will be called to reconfigure the bridges included 645 * in the specified PE so that the mulfunctional PE would be recovered 646 * again. 647 */ 648 static int pseries_eeh_configure_bridge(struct eeh_pe *pe) 649 { 650 int config_addr; 651 int ret; 652 /* Waiting 0.2s maximum before skipping configuration */ 653 int max_wait = 200; 654 655 /* Figure out the PE address */ 656 config_addr = pe->config_addr; 657 if (pe->addr) 658 config_addr = pe->addr; 659 660 while (max_wait > 0) { 661 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 662 config_addr, BUID_HI(pe->phb->buid), 663 BUID_LO(pe->phb->buid)); 664 665 if (!ret) 666 return ret; 667 668 /* 669 * If RTAS returns a delay value that's above 100ms, cut it 670 * down to 100ms in case firmware made a mistake. For more 671 * on how these delay values work see rtas_busy_delay_time 672 */ 673 if (ret > RTAS_EXTENDED_DELAY_MIN+2 && 674 ret <= RTAS_EXTENDED_DELAY_MAX) 675 ret = RTAS_EXTENDED_DELAY_MIN+2; 676 677 max_wait -= rtas_busy_delay_time(ret); 678 679 if (max_wait < 0) 680 break; 681 682 rtas_busy_delay(ret); 683 } 684 685 pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n", 686 __func__, pe->phb->global_number, pe->addr, ret); 687 return ret; 688 } 689 690 /** 691 * pseries_eeh_read_config - Read PCI config space 692 * @pdn: PCI device node 693 * @where: PCI address 694 * @size: size to read 695 * @val: return value 696 * 697 * Read config space from the speicifed device 698 */ 699 static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val) 700 { 701 return rtas_read_config(pdn, where, size, val); 702 } 703 704 /** 705 * pseries_eeh_write_config - Write PCI config space 706 * @pdn: PCI device node 707 * @where: PCI address 708 * @size: size to write 709 * @val: value to be written 710 * 711 * Write config space to the specified device 712 */ 713 static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val) 714 { 715 return rtas_write_config(pdn, where, size, val); 716 } 717 718 static int pseries_eeh_restore_config(struct pci_dn *pdn) 719 { 720 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 721 s64 ret = 0; 722 723 if (!edev) 724 return -EEXIST; 725 726 /* 727 * FIXME: The MPS, error routing rules, timeout setting are worthy 728 * to be exported by firmware in extendible way. 729 */ 730 if (edev->physfn) 731 ret = eeh_restore_vf_config(pdn); 732 733 if (ret) { 734 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", 735 __func__, edev->pe_config_addr, ret); 736 return -EIO; 737 } 738 739 return ret; 740 } 741 742 #ifdef CONFIG_PCI_IOV 743 int pseries_send_allow_unfreeze(struct pci_dn *pdn, 744 u16 *vf_pe_array, int cur_vfs) 745 { 746 int rc; 747 int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze"); 748 unsigned long buid, addr; 749 750 addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); 751 buid = pdn->phb->buid; 752 spin_lock(&rtas_data_buf_lock); 753 memcpy(rtas_data_buf, vf_pe_array, RTAS_DATA_BUF_SIZE); 754 rc = rtas_call(ibm_allow_unfreeze, 5, 1, NULL, 755 addr, 756 BUID_HI(buid), 757 BUID_LO(buid), 758 rtas_data_buf, cur_vfs * sizeof(u16)); 759 spin_unlock(&rtas_data_buf_lock); 760 if (rc) 761 pr_warn("%s: Failed to allow unfreeze for PHB#%x-PE#%lx, rc=%x\n", 762 __func__, 763 pdn->phb->global_number, addr, rc); 764 return rc; 765 } 766 767 static int pseries_call_allow_unfreeze(struct eeh_dev *edev) 768 { 769 struct pci_dn *pdn, *tmp, *parent, *physfn_pdn; 770 int cur_vfs = 0, rc = 0, vf_index, bus, devfn; 771 u16 *vf_pe_array; 772 773 vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); 774 if (!vf_pe_array) 775 return -ENOMEM; 776 if (pci_num_vf(edev->physfn ? edev->physfn : edev->pdev)) { 777 if (edev->pdev->is_physfn) { 778 cur_vfs = pci_num_vf(edev->pdev); 779 pdn = eeh_dev_to_pdn(edev); 780 parent = pdn->parent; 781 for (vf_index = 0; vf_index < cur_vfs; vf_index++) 782 vf_pe_array[vf_index] = 783 cpu_to_be16(pdn->pe_num_map[vf_index]); 784 rc = pseries_send_allow_unfreeze(pdn, vf_pe_array, 785 cur_vfs); 786 pdn->last_allow_rc = rc; 787 for (vf_index = 0; vf_index < cur_vfs; vf_index++) { 788 list_for_each_entry_safe(pdn, tmp, 789 &parent->child_list, 790 list) { 791 bus = pci_iov_virtfn_bus(edev->pdev, 792 vf_index); 793 devfn = pci_iov_virtfn_devfn(edev->pdev, 794 vf_index); 795 if (pdn->busno != bus || 796 pdn->devfn != devfn) 797 continue; 798 pdn->last_allow_rc = rc; 799 } 800 } 801 } else { 802 pdn = pci_get_pdn(edev->pdev); 803 vf_pe_array[0] = cpu_to_be16(pdn->pe_number); 804 physfn_pdn = pci_get_pdn(edev->physfn); 805 rc = pseries_send_allow_unfreeze(physfn_pdn, 806 vf_pe_array, 1); 807 pdn->last_allow_rc = rc; 808 } 809 } 810 811 kfree(vf_pe_array); 812 return rc; 813 } 814 815 static int pseries_notify_resume(struct pci_dn *pdn) 816 { 817 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 818 819 if (!edev) 820 return -EEXIST; 821 822 if (rtas_token("ibm,open-sriov-allow-unfreeze") 823 == RTAS_UNKNOWN_SERVICE) 824 return -EINVAL; 825 826 if (edev->pdev->is_physfn || edev->pdev->is_virtfn) 827 return pseries_call_allow_unfreeze(edev); 828 829 return 0; 830 } 831 #endif 832 833 static struct eeh_ops pseries_eeh_ops = { 834 .name = "pseries", 835 .init = pseries_eeh_init, 836 .probe = pseries_eeh_probe, 837 .set_option = pseries_eeh_set_option, 838 .get_pe_addr = pseries_eeh_get_pe_addr, 839 .get_state = pseries_eeh_get_state, 840 .reset = pseries_eeh_reset, 841 .get_log = pseries_eeh_get_log, 842 .configure_bridge = pseries_eeh_configure_bridge, 843 .err_inject = NULL, 844 .read_config = pseries_eeh_read_config, 845 .write_config = pseries_eeh_write_config, 846 .next_error = NULL, 847 .restore_config = pseries_eeh_restore_config, 848 #ifdef CONFIG_PCI_IOV 849 .notify_resume = pseries_notify_resume 850 #endif 851 }; 852 853 /** 854 * eeh_pseries_init - Register platform dependent EEH operations 855 * 856 * EEH initialization on pseries platform. This function should be 857 * called before any EEH related functions. 858 */ 859 static int __init eeh_pseries_init(void) 860 { 861 int ret; 862 863 ret = eeh_ops_register(&pseries_eeh_ops); 864 if (!ret) 865 pr_info("EEH: pSeries platform initialized\n"); 866 else 867 pr_info("EEH: pSeries platform initialization failure (%d)\n", 868 ret); 869 870 return ret; 871 } 872 machine_early_initcall(pseries, eeh_pseries_init); 873