1 /* 2 * The file intends to implement PE based on the information from 3 * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device. 4 * All the PEs should be organized as hierarchy tree. The first level 5 * of the tree will be associated to existing PHBs since the particular 6 * PE is only meaningful in one PHB domain. 7 * 8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 #include <linux/delay.h> 26 #include <linux/export.h> 27 #include <linux/gfp.h> 28 #include <linux/kernel.h> 29 #include <linux/pci.h> 30 #include <linux/string.h> 31 32 #include <asm/pci-bridge.h> 33 #include <asm/ppc-pci.h> 34 35 static LIST_HEAD(eeh_phb_pe); 36 37 /** 38 * eeh_pe_alloc - Allocate PE 39 * @phb: PCI controller 40 * @type: PE type 41 * 42 * Allocate PE instance dynamically. 43 */ 44 static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type) 45 { 46 struct eeh_pe *pe; 47 48 /* Allocate PHB PE */ 49 pe = kzalloc(sizeof(struct eeh_pe), GFP_KERNEL); 50 if (!pe) return NULL; 51 52 /* Initialize PHB PE */ 53 pe->type = type; 54 pe->phb = phb; 55 INIT_LIST_HEAD(&pe->child_list); 56 INIT_LIST_HEAD(&pe->child); 57 INIT_LIST_HEAD(&pe->edevs); 58 59 return pe; 60 } 61 62 /** 63 * eeh_phb_pe_create - Create PHB PE 64 * @phb: PCI controller 65 * 66 * The function should be called while the PHB is detected during 67 * system boot or PCI hotplug in order to create PHB PE. 68 */ 69 int eeh_phb_pe_create(struct pci_controller *phb) 70 { 71 struct eeh_pe *pe; 72 73 /* Allocate PHB PE */ 74 pe = eeh_pe_alloc(phb, EEH_PE_PHB); 75 if (!pe) { 76 pr_err("%s: out of memory!\n", __func__); 77 return -ENOMEM; 78 } 79 80 /* Put it into the list */ 81 list_add_tail(&pe->child, &eeh_phb_pe); 82 83 pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number); 84 85 return 0; 86 } 87 88 /** 89 * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB 90 * @phb: PCI controller 91 * 92 * The overall PEs form hierarchy tree. The first layer of the 93 * hierarchy tree is composed of PHB PEs. The function is used 94 * to retrieve the corresponding PHB PE according to the given PHB. 95 */ 96 struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb) 97 { 98 struct eeh_pe *pe; 99 100 list_for_each_entry(pe, &eeh_phb_pe, child) { 101 /* 102 * Actually, we needn't check the type since 103 * the PE for PHB has been determined when that 104 * was created. 105 */ 106 if ((pe->type & EEH_PE_PHB) && pe->phb == phb) 107 return pe; 108 } 109 110 return NULL; 111 } 112 113 /** 114 * eeh_pe_next - Retrieve the next PE in the tree 115 * @pe: current PE 116 * @root: root PE 117 * 118 * The function is used to retrieve the next PE in the 119 * hierarchy PE tree. 120 */ 121 static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, 122 struct eeh_pe *root) 123 { 124 struct list_head *next = pe->child_list.next; 125 126 if (next == &pe->child_list) { 127 while (1) { 128 if (pe == root) 129 return NULL; 130 next = pe->child.next; 131 if (next != &pe->parent->child_list) 132 break; 133 pe = pe->parent; 134 } 135 } 136 137 return list_entry(next, struct eeh_pe, child); 138 } 139 140 /** 141 * eeh_pe_traverse - Traverse PEs in the specified PHB 142 * @root: root PE 143 * @fn: callback 144 * @flag: extra parameter to callback 145 * 146 * The function is used to traverse the specified PE and its 147 * child PEs. The traversing is to be terminated once the 148 * callback returns something other than NULL, or no more PEs 149 * to be traversed. 150 */ 151 void *eeh_pe_traverse(struct eeh_pe *root, 152 eeh_traverse_func fn, void *flag) 153 { 154 struct eeh_pe *pe; 155 void *ret; 156 157 for (pe = root; pe; pe = eeh_pe_next(pe, root)) { 158 ret = fn(pe, flag); 159 if (ret) return ret; 160 } 161 162 return NULL; 163 } 164 165 /** 166 * eeh_pe_dev_traverse - Traverse the devices from the PE 167 * @root: EEH PE 168 * @fn: function callback 169 * @flag: extra parameter to callback 170 * 171 * The function is used to traverse the devices of the specified 172 * PE and its child PEs. 173 */ 174 void *eeh_pe_dev_traverse(struct eeh_pe *root, 175 eeh_traverse_func fn, void *flag) 176 { 177 struct eeh_pe *pe; 178 struct eeh_dev *edev, *tmp; 179 void *ret; 180 181 if (!root) { 182 pr_warning("%s: Invalid PE %p\n", __func__, root); 183 return NULL; 184 } 185 186 /* Traverse root PE */ 187 for (pe = root; pe; pe = eeh_pe_next(pe, root)) { 188 eeh_pe_for_each_dev(pe, edev, tmp) { 189 ret = fn(edev, flag); 190 if (ret) 191 return ret; 192 } 193 } 194 195 return NULL; 196 } 197 198 /** 199 * __eeh_pe_get - Check the PE address 200 * @data: EEH PE 201 * @flag: EEH device 202 * 203 * For one particular PE, it can be identified by PE address 204 * or tranditional BDF address. BDF address is composed of 205 * Bus/Device/Function number. The extra data referred by flag 206 * indicates which type of address should be used. 207 */ 208 static void *__eeh_pe_get(void *data, void *flag) 209 { 210 struct eeh_pe *pe = (struct eeh_pe *)data; 211 struct eeh_dev *edev = (struct eeh_dev *)flag; 212 213 /* Unexpected PHB PE */ 214 if (pe->type & EEH_PE_PHB) 215 return NULL; 216 217 /* We prefer PE address */ 218 if (edev->pe_config_addr && 219 (edev->pe_config_addr == pe->addr)) 220 return pe; 221 222 /* Try BDF address */ 223 if (edev->config_addr && 224 (edev->config_addr == pe->config_addr)) 225 return pe; 226 227 return NULL; 228 } 229 230 /** 231 * eeh_pe_get - Search PE based on the given address 232 * @edev: EEH device 233 * 234 * Search the corresponding PE based on the specified address which 235 * is included in the eeh device. The function is used to check if 236 * the associated PE has been created against the PE address. It's 237 * notable that the PE address has 2 format: traditional PE address 238 * which is composed of PCI bus/device/function number, or unified 239 * PE address. 240 */ 241 struct eeh_pe *eeh_pe_get(struct eeh_dev *edev) 242 { 243 struct eeh_pe *root = eeh_phb_pe_get(edev->phb); 244 struct eeh_pe *pe; 245 246 pe = eeh_pe_traverse(root, __eeh_pe_get, edev); 247 248 return pe; 249 } 250 251 /** 252 * eeh_pe_get_parent - Retrieve the parent PE 253 * @edev: EEH device 254 * 255 * The whole PEs existing in the system are organized as hierarchy 256 * tree. The function is used to retrieve the parent PE according 257 * to the parent EEH device. 258 */ 259 static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) 260 { 261 struct device_node *dn; 262 struct eeh_dev *parent; 263 264 /* 265 * It might have the case for the indirect parent 266 * EEH device already having associated PE, but 267 * the direct parent EEH device doesn't have yet. 268 */ 269 dn = edev->dn->parent; 270 while (dn) { 271 /* We're poking out of PCI territory */ 272 if (!PCI_DN(dn)) return NULL; 273 274 parent = of_node_to_eeh_dev(dn); 275 /* We're poking out of PCI territory */ 276 if (!parent) return NULL; 277 278 if (parent->pe) 279 return parent->pe; 280 281 dn = dn->parent; 282 } 283 284 return NULL; 285 } 286 287 /** 288 * eeh_add_to_parent_pe - Add EEH device to parent PE 289 * @edev: EEH device 290 * 291 * Add EEH device to the parent PE. If the parent PE already 292 * exists, the PE type will be changed to EEH_PE_BUS. Otherwise, 293 * we have to create new PE to hold the EEH device and the new 294 * PE will be linked to its parent PE as well. 295 */ 296 int eeh_add_to_parent_pe(struct eeh_dev *edev) 297 { 298 struct eeh_pe *pe, *parent; 299 300 /* 301 * Search the PE has been existing or not according 302 * to the PE address. If that has been existing, the 303 * PE should be composed of PCI bus and its subordinate 304 * components. 305 */ 306 pe = eeh_pe_get(edev); 307 if (pe && !(pe->type & EEH_PE_INVALID)) { 308 if (!edev->pe_config_addr) { 309 pr_err("%s: PE with addr 0x%x already exists\n", 310 __func__, edev->config_addr); 311 return -EEXIST; 312 } 313 314 /* Mark the PE as type of PCI bus */ 315 pe->type = EEH_PE_BUS; 316 edev->pe = pe; 317 318 /* Put the edev to PE */ 319 list_add_tail(&edev->list, &pe->edevs); 320 pr_debug("EEH: Add %s to Bus PE#%x\n", 321 edev->dn->full_name, pe->addr); 322 323 return 0; 324 } else if (pe && (pe->type & EEH_PE_INVALID)) { 325 list_add_tail(&edev->list, &pe->edevs); 326 edev->pe = pe; 327 /* 328 * We're running to here because of PCI hotplug caused by 329 * EEH recovery. We need clear EEH_PE_INVALID until the top. 330 */ 331 parent = pe; 332 while (parent) { 333 if (!(parent->type & EEH_PE_INVALID)) 334 break; 335 parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); 336 parent = parent->parent; 337 } 338 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", 339 edev->dn->full_name, pe->addr, pe->parent->addr); 340 341 return 0; 342 } 343 344 /* Create a new EEH PE */ 345 pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE); 346 if (!pe) { 347 pr_err("%s: out of memory!\n", __func__); 348 return -ENOMEM; 349 } 350 pe->addr = edev->pe_config_addr; 351 pe->config_addr = edev->config_addr; 352 353 /* 354 * While doing PE reset, we probably hot-reset the 355 * upstream bridge. However, the PCI devices including 356 * the associated EEH devices might be removed when EEH 357 * core is doing recovery. So that won't safe to retrieve 358 * the bridge through downstream EEH device. We have to 359 * trace the parent PCI bus, then the upstream bridge. 360 */ 361 if (eeh_probe_mode_dev()) 362 pe->bus = eeh_dev_to_pci_dev(edev)->bus; 363 364 /* 365 * Put the new EEH PE into hierarchy tree. If the parent 366 * can't be found, the newly created PE will be attached 367 * to PHB directly. Otherwise, we have to associate the 368 * PE with its parent. 369 */ 370 parent = eeh_pe_get_parent(edev); 371 if (!parent) { 372 parent = eeh_phb_pe_get(edev->phb); 373 if (!parent) { 374 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", 375 __func__, edev->phb->global_number); 376 edev->pe = NULL; 377 kfree(pe); 378 return -EEXIST; 379 } 380 } 381 pe->parent = parent; 382 383 /* 384 * Put the newly created PE into the child list and 385 * link the EEH device accordingly. 386 */ 387 list_add_tail(&pe->child, &parent->child_list); 388 list_add_tail(&edev->list, &pe->edevs); 389 edev->pe = pe; 390 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", 391 edev->dn->full_name, pe->addr, pe->parent->addr); 392 393 return 0; 394 } 395 396 /** 397 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE 398 * @edev: EEH device 399 * 400 * The PE hierarchy tree might be changed when doing PCI hotplug. 401 * Also, the PCI devices or buses could be removed from the system 402 * during EEH recovery. So we have to call the function remove the 403 * corresponding PE accordingly if necessary. 404 */ 405 int eeh_rmv_from_parent_pe(struct eeh_dev *edev) 406 { 407 struct eeh_pe *pe, *parent, *child; 408 int cnt; 409 410 if (!edev->pe) { 411 pr_debug("%s: No PE found for EEH device %s\n", 412 __func__, edev->dn->full_name); 413 return -EEXIST; 414 } 415 416 /* Remove the EEH device */ 417 pe = edev->pe; 418 edev->pe = NULL; 419 list_del(&edev->list); 420 421 /* 422 * Check if the parent PE includes any EEH devices. 423 * If not, we should delete that. Also, we should 424 * delete the parent PE if it doesn't have associated 425 * child PEs and EEH devices. 426 */ 427 while (1) { 428 parent = pe->parent; 429 if (pe->type & EEH_PE_PHB) 430 break; 431 432 if (!(pe->state & EEH_PE_KEEP)) { 433 if (list_empty(&pe->edevs) && 434 list_empty(&pe->child_list)) { 435 list_del(&pe->child); 436 kfree(pe); 437 } else { 438 break; 439 } 440 } else { 441 if (list_empty(&pe->edevs)) { 442 cnt = 0; 443 list_for_each_entry(child, &pe->child_list, child) { 444 if (!(child->type & EEH_PE_INVALID)) { 445 cnt++; 446 break; 447 } 448 } 449 450 if (!cnt) 451 pe->type |= EEH_PE_INVALID; 452 else 453 break; 454 } 455 } 456 457 pe = parent; 458 } 459 460 return 0; 461 } 462 463 /** 464 * eeh_pe_update_time_stamp - Update PE's frozen time stamp 465 * @pe: EEH PE 466 * 467 * We have time stamp for each PE to trace its time of getting 468 * frozen in last hour. The function should be called to update 469 * the time stamp on first error of the specific PE. On the other 470 * handle, we needn't account for errors happened in last hour. 471 */ 472 void eeh_pe_update_time_stamp(struct eeh_pe *pe) 473 { 474 struct timeval tstamp; 475 476 if (!pe) return; 477 478 if (pe->freeze_count <= 0) { 479 pe->freeze_count = 0; 480 do_gettimeofday(&pe->tstamp); 481 } else { 482 do_gettimeofday(&tstamp); 483 if (tstamp.tv_sec - pe->tstamp.tv_sec > 3600) { 484 pe->tstamp = tstamp; 485 pe->freeze_count = 0; 486 } 487 } 488 } 489 490 /** 491 * __eeh_pe_state_mark - Mark the state for the PE 492 * @data: EEH PE 493 * @flag: state 494 * 495 * The function is used to mark the indicated state for the given 496 * PE. Also, the associated PCI devices will be put into IO frozen 497 * state as well. 498 */ 499 static void *__eeh_pe_state_mark(void *data, void *flag) 500 { 501 struct eeh_pe *pe = (struct eeh_pe *)data; 502 int state = *((int *)flag); 503 struct eeh_dev *edev, *tmp; 504 struct pci_dev *pdev; 505 506 /* 507 * Mark the PE with the indicated state. Also, 508 * the associated PCI device will be put into 509 * I/O frozen state to avoid I/O accesses from 510 * the PCI device driver. 511 */ 512 pe->state |= state; 513 eeh_pe_for_each_dev(pe, edev, tmp) { 514 pdev = eeh_dev_to_pci_dev(edev); 515 if (pdev) 516 pdev->error_state = pci_channel_io_frozen; 517 } 518 519 return NULL; 520 } 521 522 /** 523 * eeh_pe_state_mark - Mark specified state for PE and its associated device 524 * @pe: EEH PE 525 * 526 * EEH error affects the current PE and its child PEs. The function 527 * is used to mark appropriate state for the affected PEs and the 528 * associated devices. 529 */ 530 void eeh_pe_state_mark(struct eeh_pe *pe, int state) 531 { 532 eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); 533 } 534 535 /** 536 * __eeh_pe_state_clear - Clear state for the PE 537 * @data: EEH PE 538 * @flag: state 539 * 540 * The function is used to clear the indicated state from the 541 * given PE. Besides, we also clear the check count of the PE 542 * as well. 543 */ 544 static void *__eeh_pe_state_clear(void *data, void *flag) 545 { 546 struct eeh_pe *pe = (struct eeh_pe *)data; 547 int state = *((int *)flag); 548 549 pe->state &= ~state; 550 pe->check_count = 0; 551 552 return NULL; 553 } 554 555 /** 556 * eeh_pe_state_clear - Clear state for the PE and its children 557 * @pe: PE 558 * @state: state to be cleared 559 * 560 * When the PE and its children has been recovered from error, 561 * we need clear the error state for that. The function is used 562 * for the purpose. 563 */ 564 void eeh_pe_state_clear(struct eeh_pe *pe, int state) 565 { 566 eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); 567 } 568 569 /* 570 * Some PCI bridges (e.g. PLX bridges) have primary/secondary 571 * buses assigned explicitly by firmware, and we probably have 572 * lost that after reset. So we have to delay the check until 573 * the PCI-CFG registers have been restored for the parent 574 * bridge. 575 * 576 * Don't use normal PCI-CFG accessors, which probably has been 577 * blocked on normal path during the stage. So we need utilize 578 * eeh operations, which is always permitted. 579 */ 580 static void eeh_bridge_check_link(struct eeh_dev *edev, 581 struct device_node *dn) 582 { 583 int cap; 584 uint32_t val; 585 int timeout = 0; 586 587 /* 588 * We only check root port and downstream ports of 589 * PCIe switches 590 */ 591 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT))) 592 return; 593 594 pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n", 595 __func__, edev->phb->global_number, 596 edev->config_addr >> 8, 597 PCI_SLOT(edev->config_addr & 0xFF), 598 PCI_FUNC(edev->config_addr & 0xFF)); 599 600 /* Check slot status */ 601 cap = edev->pcie_cap; 602 eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val); 603 if (!(val & PCI_EXP_SLTSTA_PDS)) { 604 pr_debug(" No card in the slot (0x%04x) !\n", val); 605 return; 606 } 607 608 /* Check power status if we have the capability */ 609 eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val); 610 if (val & PCI_EXP_SLTCAP_PCP) { 611 eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val); 612 if (val & PCI_EXP_SLTCTL_PCC) { 613 pr_debug(" In power-off state, power it on ...\n"); 614 val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); 615 val |= (0x0100 & PCI_EXP_SLTCTL_PIC); 616 eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val); 617 msleep(2 * 1000); 618 } 619 } 620 621 /* Enable link */ 622 eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val); 623 val &= ~PCI_EXP_LNKCTL_LD; 624 eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val); 625 626 /* Check link */ 627 eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val); 628 if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { 629 pr_debug(" No link reporting capability (0x%08x) \n", val); 630 msleep(1000); 631 return; 632 } 633 634 /* Wait the link is up until timeout (5s) */ 635 timeout = 0; 636 while (timeout < 5000) { 637 msleep(20); 638 timeout += 20; 639 640 eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val); 641 if (val & PCI_EXP_LNKSTA_DLLLA) 642 break; 643 } 644 645 if (val & PCI_EXP_LNKSTA_DLLLA) 646 pr_debug(" Link up (%s)\n", 647 (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB"); 648 else 649 pr_debug(" Link not ready (0x%04x)\n", val); 650 } 651 652 #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) 653 #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) 654 655 static void eeh_restore_bridge_bars(struct eeh_dev *edev, 656 struct device_node *dn) 657 { 658 int i; 659 660 /* 661 * Device BARs: 0x10 - 0x18 662 * Bus numbers and windows: 0x18 - 0x30 663 */ 664 for (i = 4; i < 13; i++) 665 eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); 666 /* Rom: 0x38 */ 667 eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]); 668 669 /* Cache line & Latency timer: 0xC 0xD */ 670 eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, 671 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 672 eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, 673 SAVED_BYTE(PCI_LATENCY_TIMER)); 674 /* Max latency, min grant, interrupt ping and line: 0x3C */ 675 eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); 676 677 /* PCI Command: 0x4 */ 678 eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]); 679 680 /* Check the PCIe link is ready */ 681 eeh_bridge_check_link(edev, dn); 682 } 683 684 static void eeh_restore_device_bars(struct eeh_dev *edev, 685 struct device_node *dn) 686 { 687 int i; 688 u32 cmd; 689 690 for (i = 4; i < 10; i++) 691 eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); 692 /* 12 == Expansion ROM Address */ 693 eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]); 694 695 eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, 696 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 697 eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, 698 SAVED_BYTE(PCI_LATENCY_TIMER)); 699 700 /* max latency, min grant, interrupt pin and line */ 701 eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); 702 703 /* 704 * Restore PERR & SERR bits, some devices require it, 705 * don't touch the other command bits 706 */ 707 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd); 708 if (edev->config_space[1] & PCI_COMMAND_PARITY) 709 cmd |= PCI_COMMAND_PARITY; 710 else 711 cmd &= ~PCI_COMMAND_PARITY; 712 if (edev->config_space[1] & PCI_COMMAND_SERR) 713 cmd |= PCI_COMMAND_SERR; 714 else 715 cmd &= ~PCI_COMMAND_SERR; 716 eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd); 717 } 718 719 /** 720 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device 721 * @data: EEH device 722 * @flag: Unused 723 * 724 * Loads the PCI configuration space base address registers, 725 * the expansion ROM base address, the latency timer, and etc. 726 * from the saved values in the device node. 727 */ 728 static void *eeh_restore_one_device_bars(void *data, void *flag) 729 { 730 struct eeh_dev *edev = (struct eeh_dev *)data; 731 struct device_node *dn = eeh_dev_to_of_node(edev); 732 733 /* Do special restore for bridges */ 734 if (edev->mode & EEH_DEV_BRIDGE) 735 eeh_restore_bridge_bars(edev, dn); 736 else 737 eeh_restore_device_bars(edev, dn); 738 739 if (eeh_ops->restore_config) 740 eeh_ops->restore_config(dn); 741 742 return NULL; 743 } 744 745 /** 746 * eeh_pe_restore_bars - Restore the PCI config space info 747 * @pe: EEH PE 748 * 749 * This routine performs a recursive walk to the children 750 * of this device as well. 751 */ 752 void eeh_pe_restore_bars(struct eeh_pe *pe) 753 { 754 /* 755 * We needn't take the EEH lock since eeh_pe_dev_traverse() 756 * will take that. 757 */ 758 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL); 759 } 760 761 /** 762 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE 763 * @pe: EEH PE 764 * 765 * Retrieve the PCI bus according to the given PE. Basically, 766 * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the 767 * primary PCI bus will be retrieved. The parent bus will be 768 * returned for BUS PE. However, we don't have associated PCI 769 * bus for DEVICE PE. 770 */ 771 struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) 772 { 773 struct pci_bus *bus = NULL; 774 struct eeh_dev *edev; 775 struct pci_dev *pdev; 776 777 if (pe->type & EEH_PE_PHB) { 778 bus = pe->phb->bus; 779 } else if (pe->type & EEH_PE_BUS || 780 pe->type & EEH_PE_DEVICE) { 781 if (pe->bus) { 782 bus = pe->bus; 783 goto out; 784 } 785 786 edev = list_first_entry(&pe->edevs, struct eeh_dev, list); 787 pdev = eeh_dev_to_pci_dev(edev); 788 if (pdev) 789 bus = pdev->bus; 790 } 791 792 out: 793 return bus; 794 } 795