1 /* 2 * The file intends to implement PE based on the information from 3 * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device. 4 * All the PEs should be organized as hierarchy tree. The first level 5 * of the tree will be associated to existing PHBs since the particular 6 * PE is only meaningful in one PHB domain. 7 * 8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 #include <linux/delay.h> 26 #include <linux/export.h> 27 #include <linux/gfp.h> 28 #include <linux/kernel.h> 29 #include <linux/pci.h> 30 #include <linux/string.h> 31 32 #include <asm/pci-bridge.h> 33 #include <asm/ppc-pci.h> 34 35 static int eeh_pe_aux_size = 0; 36 static LIST_HEAD(eeh_phb_pe); 37 38 /** 39 * eeh_set_pe_aux_size - Set PE auxillary data size 40 * @size: PE auxillary data size 41 * 42 * Set PE auxillary data size 43 */ 44 void eeh_set_pe_aux_size(int size) 45 { 46 if (size < 0) 47 return; 48 49 eeh_pe_aux_size = size; 50 } 51 52 /** 53 * eeh_pe_alloc - Allocate PE 54 * @phb: PCI controller 55 * @type: PE type 56 * 57 * Allocate PE instance dynamically. 58 */ 59 static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type) 60 { 61 struct eeh_pe *pe; 62 size_t alloc_size; 63 64 alloc_size = sizeof(struct eeh_pe); 65 if (eeh_pe_aux_size) { 66 alloc_size = ALIGN(alloc_size, cache_line_size()); 67 alloc_size += eeh_pe_aux_size; 68 } 69 70 /* Allocate PHB PE */ 71 pe = kzalloc(alloc_size, GFP_KERNEL); 72 if (!pe) return NULL; 73 74 /* Initialize PHB PE */ 75 pe->type = type; 76 pe->phb = phb; 77 INIT_LIST_HEAD(&pe->child_list); 78 INIT_LIST_HEAD(&pe->child); 79 INIT_LIST_HEAD(&pe->edevs); 80 81 pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe), 82 cache_line_size()); 83 return pe; 84 } 85 86 /** 87 * eeh_phb_pe_create - Create PHB PE 88 * @phb: PCI controller 89 * 90 * The function should be called while the PHB is detected during 91 * system boot or PCI hotplug in order to create PHB PE. 92 */ 93 int eeh_phb_pe_create(struct pci_controller *phb) 94 { 95 struct eeh_pe *pe; 96 97 /* Allocate PHB PE */ 98 pe = eeh_pe_alloc(phb, EEH_PE_PHB); 99 if (!pe) { 100 pr_err("%s: out of memory!\n", __func__); 101 return -ENOMEM; 102 } 103 104 /* Put it into the list */ 105 list_add_tail(&pe->child, &eeh_phb_pe); 106 107 pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number); 108 109 return 0; 110 } 111 112 /** 113 * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB 114 * @phb: PCI controller 115 * 116 * The overall PEs form hierarchy tree. The first layer of the 117 * hierarchy tree is composed of PHB PEs. The function is used 118 * to retrieve the corresponding PHB PE according to the given PHB. 119 */ 120 struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb) 121 { 122 struct eeh_pe *pe; 123 124 list_for_each_entry(pe, &eeh_phb_pe, child) { 125 /* 126 * Actually, we needn't check the type since 127 * the PE for PHB has been determined when that 128 * was created. 129 */ 130 if ((pe->type & EEH_PE_PHB) && pe->phb == phb) 131 return pe; 132 } 133 134 return NULL; 135 } 136 137 /** 138 * eeh_pe_next - Retrieve the next PE in the tree 139 * @pe: current PE 140 * @root: root PE 141 * 142 * The function is used to retrieve the next PE in the 143 * hierarchy PE tree. 144 */ 145 static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, 146 struct eeh_pe *root) 147 { 148 struct list_head *next = pe->child_list.next; 149 150 if (next == &pe->child_list) { 151 while (1) { 152 if (pe == root) 153 return NULL; 154 next = pe->child.next; 155 if (next != &pe->parent->child_list) 156 break; 157 pe = pe->parent; 158 } 159 } 160 161 return list_entry(next, struct eeh_pe, child); 162 } 163 164 /** 165 * eeh_pe_traverse - Traverse PEs in the specified PHB 166 * @root: root PE 167 * @fn: callback 168 * @flag: extra parameter to callback 169 * 170 * The function is used to traverse the specified PE and its 171 * child PEs. The traversing is to be terminated once the 172 * callback returns something other than NULL, or no more PEs 173 * to be traversed. 174 */ 175 void *eeh_pe_traverse(struct eeh_pe *root, 176 eeh_traverse_func fn, void *flag) 177 { 178 struct eeh_pe *pe; 179 void *ret; 180 181 for (pe = root; pe; pe = eeh_pe_next(pe, root)) { 182 ret = fn(pe, flag); 183 if (ret) return ret; 184 } 185 186 return NULL; 187 } 188 189 /** 190 * eeh_pe_dev_traverse - Traverse the devices from the PE 191 * @root: EEH PE 192 * @fn: function callback 193 * @flag: extra parameter to callback 194 * 195 * The function is used to traverse the devices of the specified 196 * PE and its child PEs. 197 */ 198 void *eeh_pe_dev_traverse(struct eeh_pe *root, 199 eeh_traverse_func fn, void *flag) 200 { 201 struct eeh_pe *pe; 202 struct eeh_dev *edev, *tmp; 203 void *ret; 204 205 if (!root) { 206 pr_warn("%s: Invalid PE %p\n", 207 __func__, root); 208 return NULL; 209 } 210 211 /* Traverse root PE */ 212 for (pe = root; pe; pe = eeh_pe_next(pe, root)) { 213 eeh_pe_for_each_dev(pe, edev, tmp) { 214 ret = fn(edev, flag); 215 if (ret) 216 return ret; 217 } 218 } 219 220 return NULL; 221 } 222 223 /** 224 * __eeh_pe_get - Check the PE address 225 * @data: EEH PE 226 * @flag: EEH device 227 * 228 * For one particular PE, it can be identified by PE address 229 * or tranditional BDF address. BDF address is composed of 230 * Bus/Device/Function number. The extra data referred by flag 231 * indicates which type of address should be used. 232 */ 233 static void *__eeh_pe_get(void *data, void *flag) 234 { 235 struct eeh_pe *pe = (struct eeh_pe *)data; 236 struct eeh_dev *edev = (struct eeh_dev *)flag; 237 238 /* Unexpected PHB PE */ 239 if (pe->type & EEH_PE_PHB) 240 return NULL; 241 242 /* 243 * We prefer PE address. For most cases, we should 244 * have non-zero PE address 245 */ 246 if (eeh_has_flag(EEH_VALID_PE_ZERO)) { 247 if (edev->pe_config_addr == pe->addr) 248 return pe; 249 } else { 250 if (edev->pe_config_addr && 251 (edev->pe_config_addr == pe->addr)) 252 return pe; 253 } 254 255 /* Try BDF address */ 256 if (edev->config_addr && 257 (edev->config_addr == pe->config_addr)) 258 return pe; 259 260 return NULL; 261 } 262 263 /** 264 * eeh_pe_get - Search PE based on the given address 265 * @edev: EEH device 266 * 267 * Search the corresponding PE based on the specified address which 268 * is included in the eeh device. The function is used to check if 269 * the associated PE has been created against the PE address. It's 270 * notable that the PE address has 2 format: traditional PE address 271 * which is composed of PCI bus/device/function number, or unified 272 * PE address. 273 */ 274 struct eeh_pe *eeh_pe_get(struct eeh_dev *edev) 275 { 276 struct eeh_pe *root = eeh_phb_pe_get(edev->phb); 277 struct eeh_pe *pe; 278 279 pe = eeh_pe_traverse(root, __eeh_pe_get, edev); 280 281 return pe; 282 } 283 284 /** 285 * eeh_pe_get_parent - Retrieve the parent PE 286 * @edev: EEH device 287 * 288 * The whole PEs existing in the system are organized as hierarchy 289 * tree. The function is used to retrieve the parent PE according 290 * to the parent EEH device. 291 */ 292 static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) 293 { 294 struct eeh_dev *parent; 295 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 296 297 /* 298 * It might have the case for the indirect parent 299 * EEH device already having associated PE, but 300 * the direct parent EEH device doesn't have yet. 301 */ 302 if (edev->physfn) 303 pdn = pci_get_pdn(edev->physfn); 304 else 305 pdn = pdn ? pdn->parent : NULL; 306 while (pdn) { 307 /* We're poking out of PCI territory */ 308 parent = pdn_to_eeh_dev(pdn); 309 if (!parent) 310 return NULL; 311 312 if (parent->pe) 313 return parent->pe; 314 315 pdn = pdn->parent; 316 } 317 318 return NULL; 319 } 320 321 /** 322 * eeh_add_to_parent_pe - Add EEH device to parent PE 323 * @edev: EEH device 324 * 325 * Add EEH device to the parent PE. If the parent PE already 326 * exists, the PE type will be changed to EEH_PE_BUS. Otherwise, 327 * we have to create new PE to hold the EEH device and the new 328 * PE will be linked to its parent PE as well. 329 */ 330 int eeh_add_to_parent_pe(struct eeh_dev *edev) 331 { 332 struct eeh_pe *pe, *parent; 333 334 /* Check if the PE number is valid */ 335 if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { 336 pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n", 337 __func__, edev->config_addr, edev->phb->global_number); 338 return -EINVAL; 339 } 340 341 /* 342 * Search the PE has been existing or not according 343 * to the PE address. If that has been existing, the 344 * PE should be composed of PCI bus and its subordinate 345 * components. 346 */ 347 pe = eeh_pe_get(edev); 348 if (pe && !(pe->type & EEH_PE_INVALID)) { 349 /* Mark the PE as type of PCI bus */ 350 pe->type = EEH_PE_BUS; 351 edev->pe = pe; 352 353 /* Put the edev to PE */ 354 list_add_tail(&edev->list, &pe->edevs); 355 pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n", 356 edev->phb->global_number, 357 edev->config_addr >> 8, 358 PCI_SLOT(edev->config_addr & 0xFF), 359 PCI_FUNC(edev->config_addr & 0xFF), 360 pe->addr); 361 return 0; 362 } else if (pe && (pe->type & EEH_PE_INVALID)) { 363 list_add_tail(&edev->list, &pe->edevs); 364 edev->pe = pe; 365 /* 366 * We're running to here because of PCI hotplug caused by 367 * EEH recovery. We need clear EEH_PE_INVALID until the top. 368 */ 369 parent = pe; 370 while (parent) { 371 if (!(parent->type & EEH_PE_INVALID)) 372 break; 373 parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); 374 parent = parent->parent; 375 } 376 377 pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device " 378 "PE#%x, Parent PE#%x\n", 379 edev->phb->global_number, 380 edev->config_addr >> 8, 381 PCI_SLOT(edev->config_addr & 0xFF), 382 PCI_FUNC(edev->config_addr & 0xFF), 383 pe->addr, pe->parent->addr); 384 return 0; 385 } 386 387 /* Create a new EEH PE */ 388 if (edev->physfn) 389 pe = eeh_pe_alloc(edev->phb, EEH_PE_VF); 390 else 391 pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE); 392 if (!pe) { 393 pr_err("%s: out of memory!\n", __func__); 394 return -ENOMEM; 395 } 396 pe->addr = edev->pe_config_addr; 397 pe->config_addr = edev->config_addr; 398 399 /* 400 * Put the new EEH PE into hierarchy tree. If the parent 401 * can't be found, the newly created PE will be attached 402 * to PHB directly. Otherwise, we have to associate the 403 * PE with its parent. 404 */ 405 parent = eeh_pe_get_parent(edev); 406 if (!parent) { 407 parent = eeh_phb_pe_get(edev->phb); 408 if (!parent) { 409 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", 410 __func__, edev->phb->global_number); 411 edev->pe = NULL; 412 kfree(pe); 413 return -EEXIST; 414 } 415 } 416 pe->parent = parent; 417 418 /* 419 * Put the newly created PE into the child list and 420 * link the EEH device accordingly. 421 */ 422 list_add_tail(&pe->child, &parent->child_list); 423 list_add_tail(&edev->list, &pe->edevs); 424 edev->pe = pe; 425 pr_debug("EEH: Add %04x:%02x:%02x.%01x to " 426 "Device PE#%x, Parent PE#%x\n", 427 edev->phb->global_number, 428 edev->config_addr >> 8, 429 PCI_SLOT(edev->config_addr & 0xFF), 430 PCI_FUNC(edev->config_addr & 0xFF), 431 pe->addr, pe->parent->addr); 432 433 return 0; 434 } 435 436 /** 437 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE 438 * @edev: EEH device 439 * 440 * The PE hierarchy tree might be changed when doing PCI hotplug. 441 * Also, the PCI devices or buses could be removed from the system 442 * during EEH recovery. So we have to call the function remove the 443 * corresponding PE accordingly if necessary. 444 */ 445 int eeh_rmv_from_parent_pe(struct eeh_dev *edev) 446 { 447 struct eeh_pe *pe, *parent, *child; 448 int cnt; 449 450 if (!edev->pe) { 451 pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n", 452 __func__, edev->phb->global_number, 453 edev->config_addr >> 8, 454 PCI_SLOT(edev->config_addr & 0xFF), 455 PCI_FUNC(edev->config_addr & 0xFF)); 456 return -EEXIST; 457 } 458 459 /* Remove the EEH device */ 460 pe = eeh_dev_to_pe(edev); 461 edev->pe = NULL; 462 list_del(&edev->list); 463 464 /* 465 * Check if the parent PE includes any EEH devices. 466 * If not, we should delete that. Also, we should 467 * delete the parent PE if it doesn't have associated 468 * child PEs and EEH devices. 469 */ 470 while (1) { 471 parent = pe->parent; 472 if (pe->type & EEH_PE_PHB) 473 break; 474 475 if (!(pe->state & EEH_PE_KEEP)) { 476 if (list_empty(&pe->edevs) && 477 list_empty(&pe->child_list)) { 478 list_del(&pe->child); 479 kfree(pe); 480 } else { 481 break; 482 } 483 } else { 484 if (list_empty(&pe->edevs)) { 485 cnt = 0; 486 list_for_each_entry(child, &pe->child_list, child) { 487 if (!(child->type & EEH_PE_INVALID)) { 488 cnt++; 489 break; 490 } 491 } 492 493 if (!cnt) 494 pe->type |= EEH_PE_INVALID; 495 else 496 break; 497 } 498 } 499 500 pe = parent; 501 } 502 503 return 0; 504 } 505 506 /** 507 * eeh_pe_update_time_stamp - Update PE's frozen time stamp 508 * @pe: EEH PE 509 * 510 * We have time stamp for each PE to trace its time of getting 511 * frozen in last hour. The function should be called to update 512 * the time stamp on first error of the specific PE. On the other 513 * handle, we needn't account for errors happened in last hour. 514 */ 515 void eeh_pe_update_time_stamp(struct eeh_pe *pe) 516 { 517 struct timeval tstamp; 518 519 if (!pe) return; 520 521 if (pe->freeze_count <= 0) { 522 pe->freeze_count = 0; 523 do_gettimeofday(&pe->tstamp); 524 } else { 525 do_gettimeofday(&tstamp); 526 if (tstamp.tv_sec - pe->tstamp.tv_sec > 3600) { 527 pe->tstamp = tstamp; 528 pe->freeze_count = 0; 529 } 530 } 531 } 532 533 /** 534 * __eeh_pe_state_mark - Mark the state for the PE 535 * @data: EEH PE 536 * @flag: state 537 * 538 * The function is used to mark the indicated state for the given 539 * PE. Also, the associated PCI devices will be put into IO frozen 540 * state as well. 541 */ 542 static void *__eeh_pe_state_mark(void *data, void *flag) 543 { 544 struct eeh_pe *pe = (struct eeh_pe *)data; 545 int state = *((int *)flag); 546 struct eeh_dev *edev, *tmp; 547 struct pci_dev *pdev; 548 549 /* Keep the state of permanently removed PE intact */ 550 if (pe->state & EEH_PE_REMOVED) 551 return NULL; 552 553 pe->state |= state; 554 555 /* Offline PCI devices if applicable */ 556 if (!(state & EEH_PE_ISOLATED)) 557 return NULL; 558 559 eeh_pe_for_each_dev(pe, edev, tmp) { 560 pdev = eeh_dev_to_pci_dev(edev); 561 if (pdev) 562 pdev->error_state = pci_channel_io_frozen; 563 } 564 565 /* Block PCI config access if required */ 566 if (pe->state & EEH_PE_CFG_RESTRICTED) 567 pe->state |= EEH_PE_CFG_BLOCKED; 568 569 return NULL; 570 } 571 572 /** 573 * eeh_pe_state_mark - Mark specified state for PE and its associated device 574 * @pe: EEH PE 575 * 576 * EEH error affects the current PE and its child PEs. The function 577 * is used to mark appropriate state for the affected PEs and the 578 * associated devices. 579 */ 580 void eeh_pe_state_mark(struct eeh_pe *pe, int state) 581 { 582 eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); 583 } 584 EXPORT_SYMBOL_GPL(eeh_pe_state_mark); 585 586 static void *__eeh_pe_dev_mode_mark(void *data, void *flag) 587 { 588 struct eeh_dev *edev = data; 589 int mode = *((int *)flag); 590 591 edev->mode |= mode; 592 593 return NULL; 594 } 595 596 /** 597 * eeh_pe_dev_state_mark - Mark state for all device under the PE 598 * @pe: EEH PE 599 * 600 * Mark specific state for all child devices of the PE. 601 */ 602 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode) 603 { 604 eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode); 605 } 606 607 /** 608 * __eeh_pe_state_clear - Clear state for the PE 609 * @data: EEH PE 610 * @flag: state 611 * 612 * The function is used to clear the indicated state from the 613 * given PE. Besides, we also clear the check count of the PE 614 * as well. 615 */ 616 static void *__eeh_pe_state_clear(void *data, void *flag) 617 { 618 struct eeh_pe *pe = (struct eeh_pe *)data; 619 int state = *((int *)flag); 620 struct eeh_dev *edev, *tmp; 621 struct pci_dev *pdev; 622 623 /* Keep the state of permanently removed PE intact */ 624 if (pe->state & EEH_PE_REMOVED) 625 return NULL; 626 627 pe->state &= ~state; 628 629 /* 630 * Special treatment on clearing isolated state. Clear 631 * check count since last isolation and put all affected 632 * devices to normal state. 633 */ 634 if (!(state & EEH_PE_ISOLATED)) 635 return NULL; 636 637 pe->check_count = 0; 638 eeh_pe_for_each_dev(pe, edev, tmp) { 639 pdev = eeh_dev_to_pci_dev(edev); 640 if (!pdev) 641 continue; 642 643 pdev->error_state = pci_channel_io_normal; 644 } 645 646 /* Unblock PCI config access if required */ 647 if (pe->state & EEH_PE_CFG_RESTRICTED) 648 pe->state &= ~EEH_PE_CFG_BLOCKED; 649 650 return NULL; 651 } 652 653 /** 654 * eeh_pe_state_clear - Clear state for the PE and its children 655 * @pe: PE 656 * @state: state to be cleared 657 * 658 * When the PE and its children has been recovered from error, 659 * we need clear the error state for that. The function is used 660 * for the purpose. 661 */ 662 void eeh_pe_state_clear(struct eeh_pe *pe, int state) 663 { 664 eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); 665 } 666 667 /** 668 * eeh_pe_state_mark_with_cfg - Mark PE state with unblocked config space 669 * @pe: PE 670 * @state: PE state to be set 671 * 672 * Set specified flag to PE and its child PEs. The PCI config space 673 * of some PEs is blocked automatically when EEH_PE_ISOLATED is set, 674 * which isn't needed in some situations. The function allows to set 675 * the specified flag to indicated PEs without blocking their PCI 676 * config space. 677 */ 678 void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state) 679 { 680 eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); 681 if (!(state & EEH_PE_ISOLATED)) 682 return; 683 684 /* Clear EEH_PE_CFG_BLOCKED, which might be set just now */ 685 state = EEH_PE_CFG_BLOCKED; 686 eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); 687 } 688 689 /* 690 * Some PCI bridges (e.g. PLX bridges) have primary/secondary 691 * buses assigned explicitly by firmware, and we probably have 692 * lost that after reset. So we have to delay the check until 693 * the PCI-CFG registers have been restored for the parent 694 * bridge. 695 * 696 * Don't use normal PCI-CFG accessors, which probably has been 697 * blocked on normal path during the stage. So we need utilize 698 * eeh operations, which is always permitted. 699 */ 700 static void eeh_bridge_check_link(struct eeh_dev *edev) 701 { 702 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 703 int cap; 704 uint32_t val; 705 int timeout = 0; 706 707 /* 708 * We only check root port and downstream ports of 709 * PCIe switches 710 */ 711 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT))) 712 return; 713 714 pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n", 715 __func__, edev->phb->global_number, 716 edev->config_addr >> 8, 717 PCI_SLOT(edev->config_addr & 0xFF), 718 PCI_FUNC(edev->config_addr & 0xFF)); 719 720 /* Check slot status */ 721 cap = edev->pcie_cap; 722 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val); 723 if (!(val & PCI_EXP_SLTSTA_PDS)) { 724 pr_debug(" No card in the slot (0x%04x) !\n", val); 725 return; 726 } 727 728 /* Check power status if we have the capability */ 729 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val); 730 if (val & PCI_EXP_SLTCAP_PCP) { 731 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val); 732 if (val & PCI_EXP_SLTCTL_PCC) { 733 pr_debug(" In power-off state, power it on ...\n"); 734 val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); 735 val |= (0x0100 & PCI_EXP_SLTCTL_PIC); 736 eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val); 737 msleep(2 * 1000); 738 } 739 } 740 741 /* Enable link */ 742 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val); 743 val &= ~PCI_EXP_LNKCTL_LD; 744 eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val); 745 746 /* Check link */ 747 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val); 748 if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { 749 pr_debug(" No link reporting capability (0x%08x) \n", val); 750 msleep(1000); 751 return; 752 } 753 754 /* Wait the link is up until timeout (5s) */ 755 timeout = 0; 756 while (timeout < 5000) { 757 msleep(20); 758 timeout += 20; 759 760 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val); 761 if (val & PCI_EXP_LNKSTA_DLLLA) 762 break; 763 } 764 765 if (val & PCI_EXP_LNKSTA_DLLLA) 766 pr_debug(" Link up (%s)\n", 767 (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB"); 768 else 769 pr_debug(" Link not ready (0x%04x)\n", val); 770 } 771 772 #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) 773 #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) 774 775 static void eeh_restore_bridge_bars(struct eeh_dev *edev) 776 { 777 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 778 int i; 779 780 /* 781 * Device BARs: 0x10 - 0x18 782 * Bus numbers and windows: 0x18 - 0x30 783 */ 784 for (i = 4; i < 13; i++) 785 eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); 786 /* Rom: 0x38 */ 787 eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]); 788 789 /* Cache line & Latency timer: 0xC 0xD */ 790 eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, 791 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 792 eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, 793 SAVED_BYTE(PCI_LATENCY_TIMER)); 794 /* Max latency, min grant, interrupt ping and line: 0x3C */ 795 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); 796 797 /* PCI Command: 0x4 */ 798 eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); 799 800 /* Check the PCIe link is ready */ 801 eeh_bridge_check_link(edev); 802 } 803 804 static void eeh_restore_device_bars(struct eeh_dev *edev) 805 { 806 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 807 int i; 808 u32 cmd; 809 810 for (i = 4; i < 10; i++) 811 eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); 812 /* 12 == Expansion ROM Address */ 813 eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]); 814 815 eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, 816 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 817 eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, 818 SAVED_BYTE(PCI_LATENCY_TIMER)); 819 820 /* max latency, min grant, interrupt pin and line */ 821 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); 822 823 /* 824 * Restore PERR & SERR bits, some devices require it, 825 * don't touch the other command bits 826 */ 827 eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd); 828 if (edev->config_space[1] & PCI_COMMAND_PARITY) 829 cmd |= PCI_COMMAND_PARITY; 830 else 831 cmd &= ~PCI_COMMAND_PARITY; 832 if (edev->config_space[1] & PCI_COMMAND_SERR) 833 cmd |= PCI_COMMAND_SERR; 834 else 835 cmd &= ~PCI_COMMAND_SERR; 836 eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd); 837 } 838 839 /** 840 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device 841 * @data: EEH device 842 * @flag: Unused 843 * 844 * Loads the PCI configuration space base address registers, 845 * the expansion ROM base address, the latency timer, and etc. 846 * from the saved values in the device node. 847 */ 848 static void *eeh_restore_one_device_bars(void *data, void *flag) 849 { 850 struct eeh_dev *edev = (struct eeh_dev *)data; 851 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 852 853 /* Do special restore for bridges */ 854 if (edev->mode & EEH_DEV_BRIDGE) 855 eeh_restore_bridge_bars(edev); 856 else 857 eeh_restore_device_bars(edev); 858 859 if (eeh_ops->restore_config && pdn) 860 eeh_ops->restore_config(pdn); 861 862 return NULL; 863 } 864 865 /** 866 * eeh_pe_restore_bars - Restore the PCI config space info 867 * @pe: EEH PE 868 * 869 * This routine performs a recursive walk to the children 870 * of this device as well. 871 */ 872 void eeh_pe_restore_bars(struct eeh_pe *pe) 873 { 874 /* 875 * We needn't take the EEH lock since eeh_pe_dev_traverse() 876 * will take that. 877 */ 878 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL); 879 } 880 881 /** 882 * eeh_pe_loc_get - Retrieve location code binding to the given PE 883 * @pe: EEH PE 884 * 885 * Retrieve the location code of the given PE. If the primary PE bus 886 * is root bus, we will grab location code from PHB device tree node 887 * or root port. Otherwise, the upstream bridge's device tree node 888 * of the primary PE bus will be checked for the location code. 889 */ 890 const char *eeh_pe_loc_get(struct eeh_pe *pe) 891 { 892 struct pci_bus *bus = eeh_pe_bus_get(pe); 893 struct device_node *dn; 894 const char *loc = NULL; 895 896 while (bus) { 897 dn = pci_bus_to_OF_node(bus); 898 if (!dn) { 899 bus = bus->parent; 900 continue; 901 } 902 903 if (pci_is_root_bus(bus)) 904 loc = of_get_property(dn, "ibm,io-base-loc-code", NULL); 905 else 906 loc = of_get_property(dn, "ibm,slot-location-code", 907 NULL); 908 909 if (loc) 910 return loc; 911 912 bus = bus->parent; 913 } 914 915 return "N/A"; 916 } 917 918 /** 919 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE 920 * @pe: EEH PE 921 * 922 * Retrieve the PCI bus according to the given PE. Basically, 923 * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the 924 * primary PCI bus will be retrieved. The parent bus will be 925 * returned for BUS PE. However, we don't have associated PCI 926 * bus for DEVICE PE. 927 */ 928 struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) 929 { 930 struct eeh_dev *edev; 931 struct pci_dev *pdev; 932 933 if (pe->type & EEH_PE_PHB) 934 return pe->phb->bus; 935 936 /* The primary bus might be cached during probe time */ 937 if (pe->state & EEH_PE_PRI_BUS) 938 return pe->bus; 939 940 /* Retrieve the parent PCI bus of first (top) PCI device */ 941 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, list); 942 pdev = eeh_dev_to_pci_dev(edev); 943 if (pdev) 944 return pdev->bus; 945 946 return NULL; 947 } 948