1 /* 2 * The file intends to implement PE based on the information from 3 * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device. 4 * All the PEs should be organized as hierarchy tree. The first level 5 * of the tree will be associated to existing PHBs since the particular 6 * PE is only meaningful in one PHB domain. 7 * 8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 #include <linux/delay.h> 26 #include <linux/export.h> 27 #include <linux/gfp.h> 28 #include <linux/kernel.h> 29 #include <linux/pci.h> 30 #include <linux/string.h> 31 32 #include <asm/pci-bridge.h> 33 #include <asm/ppc-pci.h> 34 35 static int eeh_pe_aux_size = 0; 36 static LIST_HEAD(eeh_phb_pe); 37 38 /** 39 * eeh_set_pe_aux_size - Set PE auxillary data size 40 * @size: PE auxillary data size 41 * 42 * Set PE auxillary data size 43 */ 44 void eeh_set_pe_aux_size(int size) 45 { 46 if (size < 0) 47 return; 48 49 eeh_pe_aux_size = size; 50 } 51 52 /** 53 * eeh_pe_alloc - Allocate PE 54 * @phb: PCI controller 55 * @type: PE type 56 * 57 * Allocate PE instance dynamically. 58 */ 59 static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type) 60 { 61 struct eeh_pe *pe; 62 size_t alloc_size; 63 64 alloc_size = sizeof(struct eeh_pe); 65 if (eeh_pe_aux_size) { 66 alloc_size = ALIGN(alloc_size, cache_line_size()); 67 alloc_size += eeh_pe_aux_size; 68 } 69 70 /* Allocate PHB PE */ 71 pe = kzalloc(alloc_size, GFP_KERNEL); 72 if (!pe) return NULL; 73 74 /* Initialize PHB PE */ 75 pe->type = type; 76 pe->phb = phb; 77 INIT_LIST_HEAD(&pe->child_list); 78 INIT_LIST_HEAD(&pe->edevs); 79 80 pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe), 81 cache_line_size()); 82 return pe; 83 } 84 85 /** 86 * eeh_phb_pe_create - Create PHB PE 87 * @phb: PCI controller 88 * 89 * The function should be called while the PHB is detected during 90 * system boot or PCI hotplug in order to create PHB PE. 91 */ 92 int eeh_phb_pe_create(struct pci_controller *phb) 93 { 94 struct eeh_pe *pe; 95 96 /* Allocate PHB PE */ 97 pe = eeh_pe_alloc(phb, EEH_PE_PHB); 98 if (!pe) { 99 pr_err("%s: out of memory!\n", __func__); 100 return -ENOMEM; 101 } 102 103 /* Put it into the list */ 104 list_add_tail(&pe->child, &eeh_phb_pe); 105 106 pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number); 107 108 return 0; 109 } 110 111 /** 112 * eeh_wait_state - Wait for PE state 113 * @pe: EEH PE 114 * @max_wait: maximal period in millisecond 115 * 116 * Wait for the state of associated PE. It might take some time 117 * to retrieve the PE's state. 118 */ 119 int eeh_wait_state(struct eeh_pe *pe, int max_wait) 120 { 121 int ret; 122 int mwait; 123 124 /* 125 * According to PAPR, the state of PE might be temporarily 126 * unavailable. Under the circumstance, we have to wait 127 * for indicated time determined by firmware. The maximal 128 * wait time is 5 minutes, which is acquired from the original 129 * EEH implementation. Also, the original implementation 130 * also defined the minimal wait time as 1 second. 131 */ 132 #define EEH_STATE_MIN_WAIT_TIME (1000) 133 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) 134 135 while (1) { 136 ret = eeh_ops->get_state(pe, &mwait); 137 138 if (ret != EEH_STATE_UNAVAILABLE) 139 return ret; 140 141 if (max_wait <= 0) { 142 pr_warn("%s: Timeout when getting PE's state (%d)\n", 143 __func__, max_wait); 144 return EEH_STATE_NOT_SUPPORT; 145 } 146 147 if (mwait < EEH_STATE_MIN_WAIT_TIME) { 148 pr_warn("%s: Firmware returned bad wait value %d\n", 149 __func__, mwait); 150 mwait = EEH_STATE_MIN_WAIT_TIME; 151 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { 152 pr_warn("%s: Firmware returned too long wait value %d\n", 153 __func__, mwait); 154 mwait = EEH_STATE_MAX_WAIT_TIME; 155 } 156 157 msleep(min(mwait, max_wait)); 158 max_wait -= mwait; 159 } 160 } 161 162 /** 163 * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB 164 * @phb: PCI controller 165 * 166 * The overall PEs form hierarchy tree. The first layer of the 167 * hierarchy tree is composed of PHB PEs. The function is used 168 * to retrieve the corresponding PHB PE according to the given PHB. 169 */ 170 struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb) 171 { 172 struct eeh_pe *pe; 173 174 list_for_each_entry(pe, &eeh_phb_pe, child) { 175 /* 176 * Actually, we needn't check the type since 177 * the PE for PHB has been determined when that 178 * was created. 179 */ 180 if ((pe->type & EEH_PE_PHB) && pe->phb == phb) 181 return pe; 182 } 183 184 return NULL; 185 } 186 187 /** 188 * eeh_pe_next - Retrieve the next PE in the tree 189 * @pe: current PE 190 * @root: root PE 191 * 192 * The function is used to retrieve the next PE in the 193 * hierarchy PE tree. 194 */ 195 struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root) 196 { 197 struct list_head *next = pe->child_list.next; 198 199 if (next == &pe->child_list) { 200 while (1) { 201 if (pe == root) 202 return NULL; 203 next = pe->child.next; 204 if (next != &pe->parent->child_list) 205 break; 206 pe = pe->parent; 207 } 208 } 209 210 return list_entry(next, struct eeh_pe, child); 211 } 212 213 /** 214 * eeh_pe_traverse - Traverse PEs in the specified PHB 215 * @root: root PE 216 * @fn: callback 217 * @flag: extra parameter to callback 218 * 219 * The function is used to traverse the specified PE and its 220 * child PEs. The traversing is to be terminated once the 221 * callback returns something other than NULL, or no more PEs 222 * to be traversed. 223 */ 224 void *eeh_pe_traverse(struct eeh_pe *root, 225 eeh_pe_traverse_func fn, void *flag) 226 { 227 struct eeh_pe *pe; 228 void *ret; 229 230 eeh_for_each_pe(root, pe) { 231 ret = fn(pe, flag); 232 if (ret) return ret; 233 } 234 235 return NULL; 236 } 237 238 /** 239 * eeh_pe_dev_traverse - Traverse the devices from the PE 240 * @root: EEH PE 241 * @fn: function callback 242 * @flag: extra parameter to callback 243 * 244 * The function is used to traverse the devices of the specified 245 * PE and its child PEs. 246 */ 247 void *eeh_pe_dev_traverse(struct eeh_pe *root, 248 eeh_edev_traverse_func fn, void *flag) 249 { 250 struct eeh_pe *pe; 251 struct eeh_dev *edev, *tmp; 252 void *ret; 253 254 if (!root) { 255 pr_warn("%s: Invalid PE %p\n", 256 __func__, root); 257 return NULL; 258 } 259 260 /* Traverse root PE */ 261 eeh_for_each_pe(root, pe) { 262 eeh_pe_for_each_dev(pe, edev, tmp) { 263 ret = fn(edev, flag); 264 if (ret) 265 return ret; 266 } 267 } 268 269 return NULL; 270 } 271 272 /** 273 * __eeh_pe_get - Check the PE address 274 * @data: EEH PE 275 * @flag: EEH device 276 * 277 * For one particular PE, it can be identified by PE address 278 * or tranditional BDF address. BDF address is composed of 279 * Bus/Device/Function number. The extra data referred by flag 280 * indicates which type of address should be used. 281 */ 282 struct eeh_pe_get_flag { 283 int pe_no; 284 int config_addr; 285 }; 286 287 static void *__eeh_pe_get(struct eeh_pe *pe, void *flag) 288 { 289 struct eeh_pe_get_flag *tmp = (struct eeh_pe_get_flag *) flag; 290 291 /* Unexpected PHB PE */ 292 if (pe->type & EEH_PE_PHB) 293 return NULL; 294 295 /* 296 * We prefer PE address. For most cases, we should 297 * have non-zero PE address 298 */ 299 if (eeh_has_flag(EEH_VALID_PE_ZERO)) { 300 if (tmp->pe_no == pe->addr) 301 return pe; 302 } else { 303 if (tmp->pe_no && 304 (tmp->pe_no == pe->addr)) 305 return pe; 306 } 307 308 /* Try BDF address */ 309 if (tmp->config_addr && 310 (tmp->config_addr == pe->config_addr)) 311 return pe; 312 313 return NULL; 314 } 315 316 /** 317 * eeh_pe_get - Search PE based on the given address 318 * @phb: PCI controller 319 * @pe_no: PE number 320 * @config_addr: Config address 321 * 322 * Search the corresponding PE based on the specified address which 323 * is included in the eeh device. The function is used to check if 324 * the associated PE has been created against the PE address. It's 325 * notable that the PE address has 2 format: traditional PE address 326 * which is composed of PCI bus/device/function number, or unified 327 * PE address. 328 */ 329 struct eeh_pe *eeh_pe_get(struct pci_controller *phb, 330 int pe_no, int config_addr) 331 { 332 struct eeh_pe *root = eeh_phb_pe_get(phb); 333 struct eeh_pe_get_flag tmp = { pe_no, config_addr }; 334 struct eeh_pe *pe; 335 336 pe = eeh_pe_traverse(root, __eeh_pe_get, &tmp); 337 338 return pe; 339 } 340 341 /** 342 * eeh_pe_get_parent - Retrieve the parent PE 343 * @edev: EEH device 344 * 345 * The whole PEs existing in the system are organized as hierarchy 346 * tree. The function is used to retrieve the parent PE according 347 * to the parent EEH device. 348 */ 349 static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) 350 { 351 struct eeh_dev *parent; 352 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 353 354 /* 355 * It might have the case for the indirect parent 356 * EEH device already having associated PE, but 357 * the direct parent EEH device doesn't have yet. 358 */ 359 if (edev->physfn) 360 pdn = pci_get_pdn(edev->physfn); 361 else 362 pdn = pdn ? pdn->parent : NULL; 363 while (pdn) { 364 /* We're poking out of PCI territory */ 365 parent = pdn_to_eeh_dev(pdn); 366 if (!parent) 367 return NULL; 368 369 if (parent->pe) 370 return parent->pe; 371 372 pdn = pdn->parent; 373 } 374 375 return NULL; 376 } 377 378 /** 379 * eeh_add_to_parent_pe - Add EEH device to parent PE 380 * @edev: EEH device 381 * 382 * Add EEH device to the parent PE. If the parent PE already 383 * exists, the PE type will be changed to EEH_PE_BUS. Otherwise, 384 * we have to create new PE to hold the EEH device and the new 385 * PE will be linked to its parent PE as well. 386 */ 387 int eeh_add_to_parent_pe(struct eeh_dev *edev) 388 { 389 struct eeh_pe *pe, *parent; 390 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 391 int config_addr = (pdn->busno << 8) | (pdn->devfn); 392 393 /* Check if the PE number is valid */ 394 if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { 395 pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%x\n", 396 __func__, config_addr, pdn->phb->global_number); 397 return -EINVAL; 398 } 399 400 /* 401 * Search the PE has been existing or not according 402 * to the PE address. If that has been existing, the 403 * PE should be composed of PCI bus and its subordinate 404 * components. 405 */ 406 pe = eeh_pe_get(pdn->phb, edev->pe_config_addr, config_addr); 407 if (pe && !(pe->type & EEH_PE_INVALID)) { 408 /* Mark the PE as type of PCI bus */ 409 pe->type = EEH_PE_BUS; 410 edev->pe = pe; 411 412 /* Put the edev to PE */ 413 list_add_tail(&edev->entry, &pe->edevs); 414 pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n", 415 pdn->phb->global_number, 416 pdn->busno, 417 PCI_SLOT(pdn->devfn), 418 PCI_FUNC(pdn->devfn), 419 pe->addr); 420 return 0; 421 } else if (pe && (pe->type & EEH_PE_INVALID)) { 422 list_add_tail(&edev->entry, &pe->edevs); 423 edev->pe = pe; 424 /* 425 * We're running to here because of PCI hotplug caused by 426 * EEH recovery. We need clear EEH_PE_INVALID until the top. 427 */ 428 parent = pe; 429 while (parent) { 430 if (!(parent->type & EEH_PE_INVALID)) 431 break; 432 parent->type &= ~EEH_PE_INVALID; 433 parent = parent->parent; 434 } 435 436 pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device " 437 "PE#%x, Parent PE#%x\n", 438 pdn->phb->global_number, 439 pdn->busno, 440 PCI_SLOT(pdn->devfn), 441 PCI_FUNC(pdn->devfn), 442 pe->addr, pe->parent->addr); 443 return 0; 444 } 445 446 /* Create a new EEH PE */ 447 if (edev->physfn) 448 pe = eeh_pe_alloc(pdn->phb, EEH_PE_VF); 449 else 450 pe = eeh_pe_alloc(pdn->phb, EEH_PE_DEVICE); 451 if (!pe) { 452 pr_err("%s: out of memory!\n", __func__); 453 return -ENOMEM; 454 } 455 pe->addr = edev->pe_config_addr; 456 pe->config_addr = config_addr; 457 458 /* 459 * Put the new EEH PE into hierarchy tree. If the parent 460 * can't be found, the newly created PE will be attached 461 * to PHB directly. Otherwise, we have to associate the 462 * PE with its parent. 463 */ 464 parent = eeh_pe_get_parent(edev); 465 if (!parent) { 466 parent = eeh_phb_pe_get(pdn->phb); 467 if (!parent) { 468 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", 469 __func__, pdn->phb->global_number); 470 edev->pe = NULL; 471 kfree(pe); 472 return -EEXIST; 473 } 474 } 475 pe->parent = parent; 476 477 /* 478 * Put the newly created PE into the child list and 479 * link the EEH device accordingly. 480 */ 481 list_add_tail(&pe->child, &parent->child_list); 482 list_add_tail(&edev->entry, &pe->edevs); 483 edev->pe = pe; 484 pr_debug("EEH: Add %04x:%02x:%02x.%01x to " 485 "Device PE#%x, Parent PE#%x\n", 486 pdn->phb->global_number, 487 pdn->busno, 488 PCI_SLOT(pdn->devfn), 489 PCI_FUNC(pdn->devfn), 490 pe->addr, pe->parent->addr); 491 492 return 0; 493 } 494 495 /** 496 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE 497 * @edev: EEH device 498 * 499 * The PE hierarchy tree might be changed when doing PCI hotplug. 500 * Also, the PCI devices or buses could be removed from the system 501 * during EEH recovery. So we have to call the function remove the 502 * corresponding PE accordingly if necessary. 503 */ 504 int eeh_rmv_from_parent_pe(struct eeh_dev *edev) 505 { 506 struct eeh_pe *pe, *parent, *child; 507 int cnt; 508 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 509 510 pe = eeh_dev_to_pe(edev); 511 if (!pe) { 512 pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n", 513 __func__, pdn->phb->global_number, 514 pdn->busno, 515 PCI_SLOT(pdn->devfn), 516 PCI_FUNC(pdn->devfn)); 517 return -EEXIST; 518 } 519 520 /* Remove the EEH device */ 521 edev->pe = NULL; 522 list_del(&edev->entry); 523 524 /* 525 * Check if the parent PE includes any EEH devices. 526 * If not, we should delete that. Also, we should 527 * delete the parent PE if it doesn't have associated 528 * child PEs and EEH devices. 529 */ 530 while (1) { 531 parent = pe->parent; 532 if (pe->type & EEH_PE_PHB) 533 break; 534 535 if (!(pe->state & EEH_PE_KEEP)) { 536 if (list_empty(&pe->edevs) && 537 list_empty(&pe->child_list)) { 538 list_del(&pe->child); 539 kfree(pe); 540 } else { 541 break; 542 } 543 } else { 544 if (list_empty(&pe->edevs)) { 545 cnt = 0; 546 list_for_each_entry(child, &pe->child_list, child) { 547 if (!(child->type & EEH_PE_INVALID)) { 548 cnt++; 549 break; 550 } 551 } 552 553 if (!cnt) 554 pe->type |= EEH_PE_INVALID; 555 else 556 break; 557 } 558 } 559 560 pe = parent; 561 } 562 563 return 0; 564 } 565 566 /** 567 * eeh_pe_update_time_stamp - Update PE's frozen time stamp 568 * @pe: EEH PE 569 * 570 * We have time stamp for each PE to trace its time of getting 571 * frozen in last hour. The function should be called to update 572 * the time stamp on first error of the specific PE. On the other 573 * handle, we needn't account for errors happened in last hour. 574 */ 575 void eeh_pe_update_time_stamp(struct eeh_pe *pe) 576 { 577 time64_t tstamp; 578 579 if (!pe) return; 580 581 if (pe->freeze_count <= 0) { 582 pe->freeze_count = 0; 583 pe->tstamp = ktime_get_seconds(); 584 } else { 585 tstamp = ktime_get_seconds(); 586 if (tstamp - pe->tstamp > 3600) { 587 pe->tstamp = tstamp; 588 pe->freeze_count = 0; 589 } 590 } 591 } 592 593 /** 594 * eeh_pe_state_mark - Mark specified state for PE and its associated device 595 * @pe: EEH PE 596 * 597 * EEH error affects the current PE and its child PEs. The function 598 * is used to mark appropriate state for the affected PEs and the 599 * associated devices. 600 */ 601 void eeh_pe_state_mark(struct eeh_pe *root, int state) 602 { 603 struct eeh_pe *pe; 604 605 eeh_for_each_pe(root, pe) 606 if (!(pe->state & EEH_PE_REMOVED)) 607 pe->state |= state; 608 } 609 EXPORT_SYMBOL_GPL(eeh_pe_state_mark); 610 611 /** 612 * eeh_pe_mark_isolated 613 * @pe: EEH PE 614 * 615 * Record that a PE has been isolated by marking the PE and it's children as 616 * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices 617 * as pci_channel_io_frozen. 618 */ 619 void eeh_pe_mark_isolated(struct eeh_pe *root) 620 { 621 struct eeh_pe *pe; 622 struct eeh_dev *edev; 623 struct pci_dev *pdev; 624 625 eeh_pe_state_mark(root, EEH_PE_ISOLATED); 626 eeh_for_each_pe(root, pe) { 627 list_for_each_entry(edev, &pe->edevs, entry) { 628 pdev = eeh_dev_to_pci_dev(edev); 629 if (pdev) 630 pdev->error_state = pci_channel_io_frozen; 631 } 632 /* Block PCI config access if required */ 633 if (pe->state & EEH_PE_CFG_RESTRICTED) 634 pe->state |= EEH_PE_CFG_BLOCKED; 635 } 636 } 637 EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated); 638 639 static void *__eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag) 640 { 641 int mode = *((int *)flag); 642 643 edev->mode |= mode; 644 645 return NULL; 646 } 647 648 /** 649 * eeh_pe_dev_state_mark - Mark state for all device under the PE 650 * @pe: EEH PE 651 * 652 * Mark specific state for all child devices of the PE. 653 */ 654 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode) 655 { 656 eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode); 657 } 658 659 /** 660 * __eeh_pe_state_clear - Clear state for the PE 661 * @data: EEH PE 662 * @flag: state 663 * 664 * The function is used to clear the indicated state from the 665 * given PE. Besides, we also clear the check count of the PE 666 * as well. 667 */ 668 static void *__eeh_pe_state_clear(struct eeh_pe *pe, void *flag) 669 { 670 int state = *((int *)flag); 671 struct eeh_dev *edev, *tmp; 672 struct pci_dev *pdev; 673 674 /* Keep the state of permanently removed PE intact */ 675 if (pe->state & EEH_PE_REMOVED) 676 return NULL; 677 678 pe->state &= ~state; 679 680 /* 681 * Special treatment on clearing isolated state. Clear 682 * check count since last isolation and put all affected 683 * devices to normal state. 684 */ 685 if (!(state & EEH_PE_ISOLATED)) 686 return NULL; 687 688 pe->check_count = 0; 689 eeh_pe_for_each_dev(pe, edev, tmp) { 690 pdev = eeh_dev_to_pci_dev(edev); 691 if (!pdev) 692 continue; 693 694 pdev->error_state = pci_channel_io_normal; 695 } 696 697 /* Unblock PCI config access if required */ 698 if (pe->state & EEH_PE_CFG_RESTRICTED) 699 pe->state &= ~EEH_PE_CFG_BLOCKED; 700 701 return NULL; 702 } 703 704 /** 705 * eeh_pe_state_clear - Clear state for the PE and its children 706 * @pe: PE 707 * @state: state to be cleared 708 * 709 * When the PE and its children has been recovered from error, 710 * we need clear the error state for that. The function is used 711 * for the purpose. 712 */ 713 void eeh_pe_state_clear(struct eeh_pe *pe, int state) 714 { 715 eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); 716 } 717 718 /* 719 * Some PCI bridges (e.g. PLX bridges) have primary/secondary 720 * buses assigned explicitly by firmware, and we probably have 721 * lost that after reset. So we have to delay the check until 722 * the PCI-CFG registers have been restored for the parent 723 * bridge. 724 * 725 * Don't use normal PCI-CFG accessors, which probably has been 726 * blocked on normal path during the stage. So we need utilize 727 * eeh operations, which is always permitted. 728 */ 729 static void eeh_bridge_check_link(struct eeh_dev *edev) 730 { 731 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 732 int cap; 733 uint32_t val; 734 int timeout = 0; 735 736 /* 737 * We only check root port and downstream ports of 738 * PCIe switches 739 */ 740 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT))) 741 return; 742 743 pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n", 744 __func__, pdn->phb->global_number, 745 pdn->busno, 746 PCI_SLOT(pdn->devfn), 747 PCI_FUNC(pdn->devfn)); 748 749 /* Check slot status */ 750 cap = edev->pcie_cap; 751 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val); 752 if (!(val & PCI_EXP_SLTSTA_PDS)) { 753 pr_debug(" No card in the slot (0x%04x) !\n", val); 754 return; 755 } 756 757 /* Check power status if we have the capability */ 758 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val); 759 if (val & PCI_EXP_SLTCAP_PCP) { 760 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val); 761 if (val & PCI_EXP_SLTCTL_PCC) { 762 pr_debug(" In power-off state, power it on ...\n"); 763 val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); 764 val |= (0x0100 & PCI_EXP_SLTCTL_PIC); 765 eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val); 766 msleep(2 * 1000); 767 } 768 } 769 770 /* Enable link */ 771 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val); 772 val &= ~PCI_EXP_LNKCTL_LD; 773 eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val); 774 775 /* Check link */ 776 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val); 777 if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { 778 pr_debug(" No link reporting capability (0x%08x) \n", val); 779 msleep(1000); 780 return; 781 } 782 783 /* Wait the link is up until timeout (5s) */ 784 timeout = 0; 785 while (timeout < 5000) { 786 msleep(20); 787 timeout += 20; 788 789 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val); 790 if (val & PCI_EXP_LNKSTA_DLLLA) 791 break; 792 } 793 794 if (val & PCI_EXP_LNKSTA_DLLLA) 795 pr_debug(" Link up (%s)\n", 796 (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB"); 797 else 798 pr_debug(" Link not ready (0x%04x)\n", val); 799 } 800 801 #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) 802 #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) 803 804 static void eeh_restore_bridge_bars(struct eeh_dev *edev) 805 { 806 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 807 int i; 808 809 /* 810 * Device BARs: 0x10 - 0x18 811 * Bus numbers and windows: 0x18 - 0x30 812 */ 813 for (i = 4; i < 13; i++) 814 eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); 815 /* Rom: 0x38 */ 816 eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]); 817 818 /* Cache line & Latency timer: 0xC 0xD */ 819 eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, 820 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 821 eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, 822 SAVED_BYTE(PCI_LATENCY_TIMER)); 823 /* Max latency, min grant, interrupt ping and line: 0x3C */ 824 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); 825 826 /* PCI Command: 0x4 */ 827 eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | 828 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 829 830 /* Check the PCIe link is ready */ 831 eeh_bridge_check_link(edev); 832 } 833 834 static void eeh_restore_device_bars(struct eeh_dev *edev) 835 { 836 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 837 int i; 838 u32 cmd; 839 840 for (i = 4; i < 10; i++) 841 eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); 842 /* 12 == Expansion ROM Address */ 843 eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]); 844 845 eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, 846 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 847 eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, 848 SAVED_BYTE(PCI_LATENCY_TIMER)); 849 850 /* max latency, min grant, interrupt pin and line */ 851 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); 852 853 /* 854 * Restore PERR & SERR bits, some devices require it, 855 * don't touch the other command bits 856 */ 857 eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd); 858 if (edev->config_space[1] & PCI_COMMAND_PARITY) 859 cmd |= PCI_COMMAND_PARITY; 860 else 861 cmd &= ~PCI_COMMAND_PARITY; 862 if (edev->config_space[1] & PCI_COMMAND_SERR) 863 cmd |= PCI_COMMAND_SERR; 864 else 865 cmd &= ~PCI_COMMAND_SERR; 866 eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd); 867 } 868 869 /** 870 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device 871 * @data: EEH device 872 * @flag: Unused 873 * 874 * Loads the PCI configuration space base address registers, 875 * the expansion ROM base address, the latency timer, and etc. 876 * from the saved values in the device node. 877 */ 878 static void *eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag) 879 { 880 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 881 882 /* Do special restore for bridges */ 883 if (edev->mode & EEH_DEV_BRIDGE) 884 eeh_restore_bridge_bars(edev); 885 else 886 eeh_restore_device_bars(edev); 887 888 if (eeh_ops->restore_config && pdn) 889 eeh_ops->restore_config(pdn); 890 891 return NULL; 892 } 893 894 /** 895 * eeh_pe_restore_bars - Restore the PCI config space info 896 * @pe: EEH PE 897 * 898 * This routine performs a recursive walk to the children 899 * of this device as well. 900 */ 901 void eeh_pe_restore_bars(struct eeh_pe *pe) 902 { 903 /* 904 * We needn't take the EEH lock since eeh_pe_dev_traverse() 905 * will take that. 906 */ 907 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL); 908 } 909 910 /** 911 * eeh_pe_loc_get - Retrieve location code binding to the given PE 912 * @pe: EEH PE 913 * 914 * Retrieve the location code of the given PE. If the primary PE bus 915 * is root bus, we will grab location code from PHB device tree node 916 * or root port. Otherwise, the upstream bridge's device tree node 917 * of the primary PE bus will be checked for the location code. 918 */ 919 const char *eeh_pe_loc_get(struct eeh_pe *pe) 920 { 921 struct pci_bus *bus = eeh_pe_bus_get(pe); 922 struct device_node *dn; 923 const char *loc = NULL; 924 925 while (bus) { 926 dn = pci_bus_to_OF_node(bus); 927 if (!dn) { 928 bus = bus->parent; 929 continue; 930 } 931 932 if (pci_is_root_bus(bus)) 933 loc = of_get_property(dn, "ibm,io-base-loc-code", NULL); 934 else 935 loc = of_get_property(dn, "ibm,slot-location-code", 936 NULL); 937 938 if (loc) 939 return loc; 940 941 bus = bus->parent; 942 } 943 944 return "N/A"; 945 } 946 947 /** 948 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE 949 * @pe: EEH PE 950 * 951 * Retrieve the PCI bus according to the given PE. Basically, 952 * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the 953 * primary PCI bus will be retrieved. The parent bus will be 954 * returned for BUS PE. However, we don't have associated PCI 955 * bus for DEVICE PE. 956 */ 957 struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) 958 { 959 struct eeh_dev *edev; 960 struct pci_dev *pdev; 961 962 if (pe->type & EEH_PE_PHB) 963 return pe->phb->bus; 964 965 /* The primary bus might be cached during probe time */ 966 if (pe->state & EEH_PE_PRI_BUS) 967 return pe->bus; 968 969 /* Retrieve the parent PCI bus of first (top) PCI device */ 970 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); 971 pdev = eeh_dev_to_pci_dev(edev); 972 if (pdev) 973 return pdev->bus; 974 975 return NULL; 976 } 977