1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * The file intends to implement PE based on the information from 4 * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device. 5 * All the PEs should be organized as hierarchy tree. The first level 6 * of the tree will be associated to existing PHBs since the particular 7 * PE is only meaningful in one PHB domain. 8 * 9 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012. 10 */ 11 12 #include <linux/delay.h> 13 #include <linux/export.h> 14 #include <linux/gfp.h> 15 #include <linux/kernel.h> 16 #include <linux/pci.h> 17 #include <linux/string.h> 18 19 #include <asm/pci-bridge.h> 20 #include <asm/ppc-pci.h> 21 22 static int eeh_pe_aux_size = 0; 23 static LIST_HEAD(eeh_phb_pe); 24 25 /** 26 * eeh_set_pe_aux_size - Set PE auxillary data size 27 * @size: PE auxillary data size 28 * 29 * Set PE auxillary data size 30 */ 31 void eeh_set_pe_aux_size(int size) 32 { 33 if (size < 0) 34 return; 35 36 eeh_pe_aux_size = size; 37 } 38 39 /** 40 * eeh_pe_alloc - Allocate PE 41 * @phb: PCI controller 42 * @type: PE type 43 * 44 * Allocate PE instance dynamically. 45 */ 46 static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type) 47 { 48 struct eeh_pe *pe; 49 size_t alloc_size; 50 51 alloc_size = sizeof(struct eeh_pe); 52 if (eeh_pe_aux_size) { 53 alloc_size = ALIGN(alloc_size, cache_line_size()); 54 alloc_size += eeh_pe_aux_size; 55 } 56 57 /* Allocate PHB PE */ 58 pe = kzalloc(alloc_size, GFP_KERNEL); 59 if (!pe) return NULL; 60 61 /* Initialize PHB PE */ 62 pe->type = type; 63 pe->phb = phb; 64 INIT_LIST_HEAD(&pe->child_list); 65 INIT_LIST_HEAD(&pe->edevs); 66 67 pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe), 68 cache_line_size()); 69 return pe; 70 } 71 72 /** 73 * eeh_phb_pe_create - Create PHB PE 74 * @phb: PCI controller 75 * 76 * The function should be called while the PHB is detected during 77 * system boot or PCI hotplug in order to create PHB PE. 78 */ 79 int eeh_phb_pe_create(struct pci_controller *phb) 80 { 81 struct eeh_pe *pe; 82 83 /* Allocate PHB PE */ 84 pe = eeh_pe_alloc(phb, EEH_PE_PHB); 85 if (!pe) { 86 pr_err("%s: out of memory!\n", __func__); 87 return -ENOMEM; 88 } 89 90 /* Put it into the list */ 91 list_add_tail(&pe->child, &eeh_phb_pe); 92 93 pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number); 94 95 return 0; 96 } 97 98 /** 99 * eeh_wait_state - Wait for PE state 100 * @pe: EEH PE 101 * @max_wait: maximal period in millisecond 102 * 103 * Wait for the state of associated PE. It might take some time 104 * to retrieve the PE's state. 105 */ 106 int eeh_wait_state(struct eeh_pe *pe, int max_wait) 107 { 108 int ret; 109 int mwait; 110 111 /* 112 * According to PAPR, the state of PE might be temporarily 113 * unavailable. Under the circumstance, we have to wait 114 * for indicated time determined by firmware. The maximal 115 * wait time is 5 minutes, which is acquired from the original 116 * EEH implementation. Also, the original implementation 117 * also defined the minimal wait time as 1 second. 118 */ 119 #define EEH_STATE_MIN_WAIT_TIME (1000) 120 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) 121 122 while (1) { 123 ret = eeh_ops->get_state(pe, &mwait); 124 125 if (ret != EEH_STATE_UNAVAILABLE) 126 return ret; 127 128 if (max_wait <= 0) { 129 pr_warn("%s: Timeout when getting PE's state (%d)\n", 130 __func__, max_wait); 131 return EEH_STATE_NOT_SUPPORT; 132 } 133 134 if (mwait < EEH_STATE_MIN_WAIT_TIME) { 135 pr_warn("%s: Firmware returned bad wait value %d\n", 136 __func__, mwait); 137 mwait = EEH_STATE_MIN_WAIT_TIME; 138 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { 139 pr_warn("%s: Firmware returned too long wait value %d\n", 140 __func__, mwait); 141 mwait = EEH_STATE_MAX_WAIT_TIME; 142 } 143 144 msleep(min(mwait, max_wait)); 145 max_wait -= mwait; 146 } 147 } 148 149 /** 150 * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB 151 * @phb: PCI controller 152 * 153 * The overall PEs form hierarchy tree. The first layer of the 154 * hierarchy tree is composed of PHB PEs. The function is used 155 * to retrieve the corresponding PHB PE according to the given PHB. 156 */ 157 struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb) 158 { 159 struct eeh_pe *pe; 160 161 list_for_each_entry(pe, &eeh_phb_pe, child) { 162 /* 163 * Actually, we needn't check the type since 164 * the PE for PHB has been determined when that 165 * was created. 166 */ 167 if ((pe->type & EEH_PE_PHB) && pe->phb == phb) 168 return pe; 169 } 170 171 return NULL; 172 } 173 174 /** 175 * eeh_pe_next - Retrieve the next PE in the tree 176 * @pe: current PE 177 * @root: root PE 178 * 179 * The function is used to retrieve the next PE in the 180 * hierarchy PE tree. 181 */ 182 struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root) 183 { 184 struct list_head *next = pe->child_list.next; 185 186 if (next == &pe->child_list) { 187 while (1) { 188 if (pe == root) 189 return NULL; 190 next = pe->child.next; 191 if (next != &pe->parent->child_list) 192 break; 193 pe = pe->parent; 194 } 195 } 196 197 return list_entry(next, struct eeh_pe, child); 198 } 199 200 /** 201 * eeh_pe_traverse - Traverse PEs in the specified PHB 202 * @root: root PE 203 * @fn: callback 204 * @flag: extra parameter to callback 205 * 206 * The function is used to traverse the specified PE and its 207 * child PEs. The traversing is to be terminated once the 208 * callback returns something other than NULL, or no more PEs 209 * to be traversed. 210 */ 211 void *eeh_pe_traverse(struct eeh_pe *root, 212 eeh_pe_traverse_func fn, void *flag) 213 { 214 struct eeh_pe *pe; 215 void *ret; 216 217 eeh_for_each_pe(root, pe) { 218 ret = fn(pe, flag); 219 if (ret) return ret; 220 } 221 222 return NULL; 223 } 224 225 /** 226 * eeh_pe_dev_traverse - Traverse the devices from the PE 227 * @root: EEH PE 228 * @fn: function callback 229 * @flag: extra parameter to callback 230 * 231 * The function is used to traverse the devices of the specified 232 * PE and its child PEs. 233 */ 234 void eeh_pe_dev_traverse(struct eeh_pe *root, 235 eeh_edev_traverse_func fn, void *flag) 236 { 237 struct eeh_pe *pe; 238 struct eeh_dev *edev, *tmp; 239 240 if (!root) { 241 pr_warn("%s: Invalid PE %p\n", 242 __func__, root); 243 return; 244 } 245 246 /* Traverse root PE */ 247 eeh_for_each_pe(root, pe) 248 eeh_pe_for_each_dev(pe, edev, tmp) 249 fn(edev, flag); 250 } 251 252 /** 253 * __eeh_pe_get - Check the PE address 254 * @data: EEH PE 255 * @flag: EEH device 256 * 257 * For one particular PE, it can be identified by PE address 258 * or tranditional BDF address. BDF address is composed of 259 * Bus/Device/Function number. The extra data referred by flag 260 * indicates which type of address should be used. 261 */ 262 struct eeh_pe_get_flag { 263 int pe_no; 264 int config_addr; 265 }; 266 267 static void *__eeh_pe_get(struct eeh_pe *pe, void *flag) 268 { 269 struct eeh_pe_get_flag *tmp = (struct eeh_pe_get_flag *) flag; 270 271 /* Unexpected PHB PE */ 272 if (pe->type & EEH_PE_PHB) 273 return NULL; 274 275 /* 276 * We prefer PE address. For most cases, we should 277 * have non-zero PE address 278 */ 279 if (eeh_has_flag(EEH_VALID_PE_ZERO)) { 280 if (tmp->pe_no == pe->addr) 281 return pe; 282 } else { 283 if (tmp->pe_no && 284 (tmp->pe_no == pe->addr)) 285 return pe; 286 } 287 288 /* Try BDF address */ 289 if (tmp->config_addr && 290 (tmp->config_addr == pe->config_addr)) 291 return pe; 292 293 return NULL; 294 } 295 296 /** 297 * eeh_pe_get - Search PE based on the given address 298 * @phb: PCI controller 299 * @pe_no: PE number 300 * @config_addr: Config address 301 * 302 * Search the corresponding PE based on the specified address which 303 * is included in the eeh device. The function is used to check if 304 * the associated PE has been created against the PE address. It's 305 * notable that the PE address has 2 format: traditional PE address 306 * which is composed of PCI bus/device/function number, or unified 307 * PE address. 308 */ 309 struct eeh_pe *eeh_pe_get(struct pci_controller *phb, 310 int pe_no, int config_addr) 311 { 312 struct eeh_pe *root = eeh_phb_pe_get(phb); 313 struct eeh_pe_get_flag tmp = { pe_no, config_addr }; 314 struct eeh_pe *pe; 315 316 pe = eeh_pe_traverse(root, __eeh_pe_get, &tmp); 317 318 return pe; 319 } 320 321 /** 322 * eeh_pe_get_parent - Retrieve the parent PE 323 * @edev: EEH device 324 * 325 * The whole PEs existing in the system are organized as hierarchy 326 * tree. The function is used to retrieve the parent PE according 327 * to the parent EEH device. 328 */ 329 static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) 330 { 331 struct eeh_dev *parent; 332 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 333 334 /* 335 * It might have the case for the indirect parent 336 * EEH device already having associated PE, but 337 * the direct parent EEH device doesn't have yet. 338 */ 339 if (edev->physfn) 340 pdn = pci_get_pdn(edev->physfn); 341 else 342 pdn = pdn ? pdn->parent : NULL; 343 while (pdn) { 344 /* We're poking out of PCI territory */ 345 parent = pdn_to_eeh_dev(pdn); 346 if (!parent) 347 return NULL; 348 349 if (parent->pe) 350 return parent->pe; 351 352 pdn = pdn->parent; 353 } 354 355 return NULL; 356 } 357 358 /** 359 * eeh_add_to_parent_pe - Add EEH device to parent PE 360 * @edev: EEH device 361 * 362 * Add EEH device to the parent PE. If the parent PE already 363 * exists, the PE type will be changed to EEH_PE_BUS. Otherwise, 364 * we have to create new PE to hold the EEH device and the new 365 * PE will be linked to its parent PE as well. 366 */ 367 int eeh_add_to_parent_pe(struct eeh_dev *edev) 368 { 369 struct eeh_pe *pe, *parent; 370 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 371 int config_addr = (pdn->busno << 8) | (pdn->devfn); 372 373 /* Check if the PE number is valid */ 374 if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { 375 eeh_edev_err(edev, "PE#0 is invalid for this PHB!\n"); 376 return -EINVAL; 377 } 378 379 /* 380 * Search the PE has been existing or not according 381 * to the PE address. If that has been existing, the 382 * PE should be composed of PCI bus and its subordinate 383 * components. 384 */ 385 pe = eeh_pe_get(pdn->phb, edev->pe_config_addr, config_addr); 386 if (pe) { 387 if (pe->type & EEH_PE_INVALID) { 388 list_add_tail(&edev->entry, &pe->edevs); 389 edev->pe = pe; 390 /* 391 * We're running to here because of PCI hotplug caused by 392 * EEH recovery. We need clear EEH_PE_INVALID until the top. 393 */ 394 parent = pe; 395 while (parent) { 396 if (!(parent->type & EEH_PE_INVALID)) 397 break; 398 parent->type &= ~EEH_PE_INVALID; 399 parent = parent->parent; 400 } 401 402 eeh_edev_dbg(edev, 403 "Added to device PE (parent: PE#%x)\n", 404 pe->parent->addr); 405 } else { 406 /* Mark the PE as type of PCI bus */ 407 pe->type = EEH_PE_BUS; 408 edev->pe = pe; 409 410 /* Put the edev to PE */ 411 list_add_tail(&edev->entry, &pe->edevs); 412 eeh_edev_dbg(edev, "Added to bus PE\n"); 413 } 414 return 0; 415 } 416 417 /* Create a new EEH PE */ 418 if (edev->physfn) 419 pe = eeh_pe_alloc(pdn->phb, EEH_PE_VF); 420 else 421 pe = eeh_pe_alloc(pdn->phb, EEH_PE_DEVICE); 422 if (!pe) { 423 pr_err("%s: out of memory!\n", __func__); 424 return -ENOMEM; 425 } 426 pe->addr = edev->pe_config_addr; 427 pe->config_addr = config_addr; 428 429 /* 430 * Put the new EEH PE into hierarchy tree. If the parent 431 * can't be found, the newly created PE will be attached 432 * to PHB directly. Otherwise, we have to associate the 433 * PE with its parent. 434 */ 435 parent = eeh_pe_get_parent(edev); 436 if (!parent) { 437 parent = eeh_phb_pe_get(pdn->phb); 438 if (!parent) { 439 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", 440 __func__, pdn->phb->global_number); 441 edev->pe = NULL; 442 kfree(pe); 443 return -EEXIST; 444 } 445 } 446 pe->parent = parent; 447 448 /* 449 * Put the newly created PE into the child list and 450 * link the EEH device accordingly. 451 */ 452 list_add_tail(&pe->child, &parent->child_list); 453 list_add_tail(&edev->entry, &pe->edevs); 454 edev->pe = pe; 455 eeh_edev_dbg(edev, "Added to device PE (parent: PE#%x)\n", 456 pe->parent->addr); 457 458 return 0; 459 } 460 461 /** 462 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE 463 * @edev: EEH device 464 * 465 * The PE hierarchy tree might be changed when doing PCI hotplug. 466 * Also, the PCI devices or buses could be removed from the system 467 * during EEH recovery. So we have to call the function remove the 468 * corresponding PE accordingly if necessary. 469 */ 470 int eeh_rmv_from_parent_pe(struct eeh_dev *edev) 471 { 472 struct eeh_pe *pe, *parent, *child; 473 bool keep, recover; 474 int cnt; 475 476 pe = eeh_dev_to_pe(edev); 477 if (!pe) { 478 eeh_edev_dbg(edev, "No PE found for device.\n"); 479 return -EEXIST; 480 } 481 482 /* Remove the EEH device */ 483 edev->pe = NULL; 484 list_del(&edev->entry); 485 486 /* 487 * Check if the parent PE includes any EEH devices. 488 * If not, we should delete that. Also, we should 489 * delete the parent PE if it doesn't have associated 490 * child PEs and EEH devices. 491 */ 492 while (1) { 493 parent = pe->parent; 494 495 /* PHB PEs should never be removed */ 496 if (pe->type & EEH_PE_PHB) 497 break; 498 499 /* 500 * XXX: KEEP is set while resetting a PE. I don't think it's 501 * ever set without RECOVERING also being set. I could 502 * be wrong though so catch that with a WARN. 503 */ 504 keep = !!(pe->state & EEH_PE_KEEP); 505 recover = !!(pe->state & EEH_PE_RECOVERING); 506 WARN_ON(keep && !recover); 507 508 if (!keep && !recover) { 509 if (list_empty(&pe->edevs) && 510 list_empty(&pe->child_list)) { 511 list_del(&pe->child); 512 kfree(pe); 513 } else { 514 break; 515 } 516 } else { 517 /* 518 * Mark the PE as invalid. At the end of the recovery 519 * process any invalid PEs will be garbage collected. 520 * 521 * We need to delay the free()ing of them since we can 522 * remove edev's while traversing the PE tree which 523 * might trigger the removal of a PE and we can't 524 * deal with that (yet). 525 */ 526 if (list_empty(&pe->edevs)) { 527 cnt = 0; 528 list_for_each_entry(child, &pe->child_list, child) { 529 if (!(child->type & EEH_PE_INVALID)) { 530 cnt++; 531 break; 532 } 533 } 534 535 if (!cnt) 536 pe->type |= EEH_PE_INVALID; 537 else 538 break; 539 } 540 } 541 542 pe = parent; 543 } 544 545 return 0; 546 } 547 548 /** 549 * eeh_pe_update_time_stamp - Update PE's frozen time stamp 550 * @pe: EEH PE 551 * 552 * We have time stamp for each PE to trace its time of getting 553 * frozen in last hour. The function should be called to update 554 * the time stamp on first error of the specific PE. On the other 555 * handle, we needn't account for errors happened in last hour. 556 */ 557 void eeh_pe_update_time_stamp(struct eeh_pe *pe) 558 { 559 time64_t tstamp; 560 561 if (!pe) return; 562 563 if (pe->freeze_count <= 0) { 564 pe->freeze_count = 0; 565 pe->tstamp = ktime_get_seconds(); 566 } else { 567 tstamp = ktime_get_seconds(); 568 if (tstamp - pe->tstamp > 3600) { 569 pe->tstamp = tstamp; 570 pe->freeze_count = 0; 571 } 572 } 573 } 574 575 /** 576 * eeh_pe_state_mark - Mark specified state for PE and its associated device 577 * @pe: EEH PE 578 * 579 * EEH error affects the current PE and its child PEs. The function 580 * is used to mark appropriate state for the affected PEs and the 581 * associated devices. 582 */ 583 void eeh_pe_state_mark(struct eeh_pe *root, int state) 584 { 585 struct eeh_pe *pe; 586 587 eeh_for_each_pe(root, pe) 588 if (!(pe->state & EEH_PE_REMOVED)) 589 pe->state |= state; 590 } 591 EXPORT_SYMBOL_GPL(eeh_pe_state_mark); 592 593 /** 594 * eeh_pe_mark_isolated 595 * @pe: EEH PE 596 * 597 * Record that a PE has been isolated by marking the PE and it's children as 598 * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices 599 * as pci_channel_io_frozen. 600 */ 601 void eeh_pe_mark_isolated(struct eeh_pe *root) 602 { 603 struct eeh_pe *pe; 604 struct eeh_dev *edev; 605 struct pci_dev *pdev; 606 607 eeh_pe_state_mark(root, EEH_PE_ISOLATED); 608 eeh_for_each_pe(root, pe) { 609 list_for_each_entry(edev, &pe->edevs, entry) { 610 pdev = eeh_dev_to_pci_dev(edev); 611 if (pdev) 612 pdev->error_state = pci_channel_io_frozen; 613 } 614 /* Block PCI config access if required */ 615 if (pe->state & EEH_PE_CFG_RESTRICTED) 616 pe->state |= EEH_PE_CFG_BLOCKED; 617 } 618 } 619 EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated); 620 621 static void __eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag) 622 { 623 int mode = *((int *)flag); 624 625 edev->mode |= mode; 626 } 627 628 /** 629 * eeh_pe_dev_state_mark - Mark state for all device under the PE 630 * @pe: EEH PE 631 * 632 * Mark specific state for all child devices of the PE. 633 */ 634 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode) 635 { 636 eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode); 637 } 638 639 /** 640 * eeh_pe_state_clear - Clear state for the PE 641 * @data: EEH PE 642 * @state: state 643 * @include_passed: include passed-through devices? 644 * 645 * The function is used to clear the indicated state from the 646 * given PE. Besides, we also clear the check count of the PE 647 * as well. 648 */ 649 void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed) 650 { 651 struct eeh_pe *pe; 652 struct eeh_dev *edev, *tmp; 653 struct pci_dev *pdev; 654 655 eeh_for_each_pe(root, pe) { 656 /* Keep the state of permanently removed PE intact */ 657 if (pe->state & EEH_PE_REMOVED) 658 continue; 659 660 if (!include_passed && eeh_pe_passed(pe)) 661 continue; 662 663 pe->state &= ~state; 664 665 /* 666 * Special treatment on clearing isolated state. Clear 667 * check count since last isolation and put all affected 668 * devices to normal state. 669 */ 670 if (!(state & EEH_PE_ISOLATED)) 671 continue; 672 673 pe->check_count = 0; 674 eeh_pe_for_each_dev(pe, edev, tmp) { 675 pdev = eeh_dev_to_pci_dev(edev); 676 if (!pdev) 677 continue; 678 679 pdev->error_state = pci_channel_io_normal; 680 } 681 682 /* Unblock PCI config access if required */ 683 if (pe->state & EEH_PE_CFG_RESTRICTED) 684 pe->state &= ~EEH_PE_CFG_BLOCKED; 685 } 686 } 687 688 /* 689 * Some PCI bridges (e.g. PLX bridges) have primary/secondary 690 * buses assigned explicitly by firmware, and we probably have 691 * lost that after reset. So we have to delay the check until 692 * the PCI-CFG registers have been restored for the parent 693 * bridge. 694 * 695 * Don't use normal PCI-CFG accessors, which probably has been 696 * blocked on normal path during the stage. So we need utilize 697 * eeh operations, which is always permitted. 698 */ 699 static void eeh_bridge_check_link(struct eeh_dev *edev) 700 { 701 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 702 int cap; 703 uint32_t val; 704 int timeout = 0; 705 706 /* 707 * We only check root port and downstream ports of 708 * PCIe switches 709 */ 710 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT))) 711 return; 712 713 eeh_edev_dbg(edev, "Checking PCIe link...\n"); 714 715 /* Check slot status */ 716 cap = edev->pcie_cap; 717 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val); 718 if (!(val & PCI_EXP_SLTSTA_PDS)) { 719 eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val); 720 return; 721 } 722 723 /* Check power status if we have the capability */ 724 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val); 725 if (val & PCI_EXP_SLTCAP_PCP) { 726 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val); 727 if (val & PCI_EXP_SLTCTL_PCC) { 728 eeh_edev_dbg(edev, "In power-off state, power it on ...\n"); 729 val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); 730 val |= (0x0100 & PCI_EXP_SLTCTL_PIC); 731 eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val); 732 msleep(2 * 1000); 733 } 734 } 735 736 /* Enable link */ 737 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val); 738 val &= ~PCI_EXP_LNKCTL_LD; 739 eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val); 740 741 /* Check link */ 742 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val); 743 if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { 744 eeh_edev_dbg(edev, "No link reporting capability (0x%08x) \n", val); 745 msleep(1000); 746 return; 747 } 748 749 /* Wait the link is up until timeout (5s) */ 750 timeout = 0; 751 while (timeout < 5000) { 752 msleep(20); 753 timeout += 20; 754 755 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val); 756 if (val & PCI_EXP_LNKSTA_DLLLA) 757 break; 758 } 759 760 if (val & PCI_EXP_LNKSTA_DLLLA) 761 eeh_edev_dbg(edev, "Link up (%s)\n", 762 (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB"); 763 else 764 eeh_edev_dbg(edev, "Link not ready (0x%04x)\n", val); 765 } 766 767 #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) 768 #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) 769 770 static void eeh_restore_bridge_bars(struct eeh_dev *edev) 771 { 772 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 773 int i; 774 775 /* 776 * Device BARs: 0x10 - 0x18 777 * Bus numbers and windows: 0x18 - 0x30 778 */ 779 for (i = 4; i < 13; i++) 780 eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); 781 /* Rom: 0x38 */ 782 eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]); 783 784 /* Cache line & Latency timer: 0xC 0xD */ 785 eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, 786 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 787 eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, 788 SAVED_BYTE(PCI_LATENCY_TIMER)); 789 /* Max latency, min grant, interrupt ping and line: 0x3C */ 790 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); 791 792 /* PCI Command: 0x4 */ 793 eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | 794 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 795 796 /* Check the PCIe link is ready */ 797 eeh_bridge_check_link(edev); 798 } 799 800 static void eeh_restore_device_bars(struct eeh_dev *edev) 801 { 802 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 803 int i; 804 u32 cmd; 805 806 for (i = 4; i < 10; i++) 807 eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); 808 /* 12 == Expansion ROM Address */ 809 eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]); 810 811 eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, 812 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 813 eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, 814 SAVED_BYTE(PCI_LATENCY_TIMER)); 815 816 /* max latency, min grant, interrupt pin and line */ 817 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); 818 819 /* 820 * Restore PERR & SERR bits, some devices require it, 821 * don't touch the other command bits 822 */ 823 eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd); 824 if (edev->config_space[1] & PCI_COMMAND_PARITY) 825 cmd |= PCI_COMMAND_PARITY; 826 else 827 cmd &= ~PCI_COMMAND_PARITY; 828 if (edev->config_space[1] & PCI_COMMAND_SERR) 829 cmd |= PCI_COMMAND_SERR; 830 else 831 cmd &= ~PCI_COMMAND_SERR; 832 eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd); 833 } 834 835 /** 836 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device 837 * @data: EEH device 838 * @flag: Unused 839 * 840 * Loads the PCI configuration space base address registers, 841 * the expansion ROM base address, the latency timer, and etc. 842 * from the saved values in the device node. 843 */ 844 static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag) 845 { 846 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 847 848 /* Do special restore for bridges */ 849 if (edev->mode & EEH_DEV_BRIDGE) 850 eeh_restore_bridge_bars(edev); 851 else 852 eeh_restore_device_bars(edev); 853 854 if (eeh_ops->restore_config && pdn) 855 eeh_ops->restore_config(pdn); 856 } 857 858 /** 859 * eeh_pe_restore_bars - Restore the PCI config space info 860 * @pe: EEH PE 861 * 862 * This routine performs a recursive walk to the children 863 * of this device as well. 864 */ 865 void eeh_pe_restore_bars(struct eeh_pe *pe) 866 { 867 /* 868 * We needn't take the EEH lock since eeh_pe_dev_traverse() 869 * will take that. 870 */ 871 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL); 872 } 873 874 /** 875 * eeh_pe_loc_get - Retrieve location code binding to the given PE 876 * @pe: EEH PE 877 * 878 * Retrieve the location code of the given PE. If the primary PE bus 879 * is root bus, we will grab location code from PHB device tree node 880 * or root port. Otherwise, the upstream bridge's device tree node 881 * of the primary PE bus will be checked for the location code. 882 */ 883 const char *eeh_pe_loc_get(struct eeh_pe *pe) 884 { 885 struct pci_bus *bus = eeh_pe_bus_get(pe); 886 struct device_node *dn; 887 const char *loc = NULL; 888 889 while (bus) { 890 dn = pci_bus_to_OF_node(bus); 891 if (!dn) { 892 bus = bus->parent; 893 continue; 894 } 895 896 if (pci_is_root_bus(bus)) 897 loc = of_get_property(dn, "ibm,io-base-loc-code", NULL); 898 else 899 loc = of_get_property(dn, "ibm,slot-location-code", 900 NULL); 901 902 if (loc) 903 return loc; 904 905 bus = bus->parent; 906 } 907 908 return "N/A"; 909 } 910 911 /** 912 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE 913 * @pe: EEH PE 914 * 915 * Retrieve the PCI bus according to the given PE. Basically, 916 * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the 917 * primary PCI bus will be retrieved. The parent bus will be 918 * returned for BUS PE. However, we don't have associated PCI 919 * bus for DEVICE PE. 920 */ 921 struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) 922 { 923 struct eeh_dev *edev; 924 struct pci_dev *pdev; 925 926 if (pe->type & EEH_PE_PHB) 927 return pe->phb->bus; 928 929 /* The primary bus might be cached during probe time */ 930 if (pe->state & EEH_PE_PRI_BUS) 931 return pe->bus; 932 933 /* Retrieve the parent PCI bus of first (top) PCI device */ 934 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); 935 pdev = eeh_dev_to_pci_dev(edev); 936 if (pdev) 937 return pdev->bus; 938 939 return NULL; 940 } 941