1 /* 2 * The file intends to implement the platform dependent EEH operations on pseries. 3 * Actually, the pseries platform is built based on RTAS heavily. That means the 4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions 5 * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has 6 * been done. 7 * 8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. 9 * Copyright IBM Corporation 2001, 2005, 2006 10 * Copyright Dave Engebretsen & Todd Inglett 2001 11 * Copyright Linas Vepstas 2005, 2006 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28 #include <linux/atomic.h> 29 #include <linux/delay.h> 30 #include <linux/export.h> 31 #include <linux/init.h> 32 #include <linux/list.h> 33 #include <linux/of.h> 34 #include <linux/pci.h> 35 #include <linux/proc_fs.h> 36 #include <linux/rbtree.h> 37 #include <linux/sched.h> 38 #include <linux/seq_file.h> 39 #include <linux/spinlock.h> 40 41 #include <asm/eeh.h> 42 #include <asm/eeh_event.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/ppc-pci.h> 46 #include <asm/rtas.h> 47 48 /* RTAS tokens */ 49 static int ibm_set_eeh_option; 50 static int ibm_set_slot_reset; 51 static int ibm_read_slot_reset_state; 52 static int ibm_read_slot_reset_state2; 53 static int ibm_slot_error_detail; 54 static int ibm_get_config_addr_info; 55 static int ibm_get_config_addr_info2; 56 static int ibm_configure_bridge; 57 static int ibm_configure_pe; 58 59 /* 60 * Buffer for reporting slot-error-detail rtas calls. Its here 61 * in BSS, and not dynamically alloced, so that it ends up in 62 * RMO where RTAS can access it. 63 */ 64 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; 65 static DEFINE_SPINLOCK(slot_errbuf_lock); 66 static int eeh_error_buf_size; 67 68 /** 69 * pseries_eeh_init - EEH platform dependent initialization 70 * 71 * EEH platform dependent initialization on pseries. 72 */ 73 static int pseries_eeh_init(void) 74 { 75 /* figure out EEH RTAS function call tokens */ 76 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); 77 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); 78 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); 79 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); 80 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); 81 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 82 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 83 ibm_configure_pe = rtas_token("ibm,configure-pe"); 84 ibm_configure_bridge = rtas_token("ibm,configure-bridge"); 85 86 /* 87 * Necessary sanity check. We needn't check "get-config-addr-info" 88 * and its variant since the old firmware probably support address 89 * of domain/bus/slot/function for EEH RTAS operations. 90 */ 91 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { 92 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", 93 __func__); 94 return -EINVAL; 95 } else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) { 96 pr_warning("%s: RTAS service <ibm,set-slot-reset> invalid\n", 97 __func__); 98 return -EINVAL; 99 } else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && 100 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) { 101 pr_warning("%s: RTAS service <ibm,read-slot-reset-state2> and " 102 "<ibm,read-slot-reset-state> invalid\n", 103 __func__); 104 return -EINVAL; 105 } else if (ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE) { 106 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", 107 __func__); 108 return -EINVAL; 109 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && 110 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { 111 pr_warning("%s: RTAS service <ibm,configure-pe> and " 112 "<ibm,configure-bridge> invalid\n", 113 __func__); 114 return -EINVAL; 115 } 116 117 /* Initialize error log lock and size */ 118 spin_lock_init(&slot_errbuf_lock); 119 eeh_error_buf_size = rtas_token("rtas-error-log-max"); 120 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { 121 pr_warning("%s: unknown EEH error log size\n", 122 __func__); 123 eeh_error_buf_size = 1024; 124 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { 125 pr_warning("%s: EEH error log size %d exceeds the maximal %d\n", 126 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); 127 eeh_error_buf_size = RTAS_ERROR_LOG_MAX; 128 } 129 130 /* Set EEH probe mode */ 131 eeh_probe_mode_set(EEH_PROBE_MODE_DEVTREE); 132 133 return 0; 134 } 135 136 static int pseries_eeh_cap_start(struct device_node *dn) 137 { 138 struct pci_dn *pdn = PCI_DN(dn); 139 u32 status; 140 141 if (!pdn) 142 return 0; 143 144 rtas_read_config(pdn, PCI_STATUS, 2, &status); 145 if (!(status & PCI_STATUS_CAP_LIST)) 146 return 0; 147 148 return PCI_CAPABILITY_LIST; 149 } 150 151 152 static int pseries_eeh_find_cap(struct device_node *dn, int cap) 153 { 154 struct pci_dn *pdn = PCI_DN(dn); 155 int pos = pseries_eeh_cap_start(dn); 156 int cnt = 48; /* Maximal number of capabilities */ 157 u32 id; 158 159 if (!pos) 160 return 0; 161 162 while (cnt--) { 163 rtas_read_config(pdn, pos, 1, &pos); 164 if (pos < 0x40) 165 break; 166 pos &= ~3; 167 rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 168 if (id == 0xff) 169 break; 170 if (id == cap) 171 return pos; 172 pos += PCI_CAP_LIST_NEXT; 173 } 174 175 return 0; 176 } 177 178 /** 179 * pseries_eeh_of_probe - EEH probe on the given device 180 * @dn: OF node 181 * @flag: Unused 182 * 183 * When EEH module is installed during system boot, all PCI devices 184 * are checked one by one to see if it supports EEH. The function 185 * is introduced for the purpose. 186 */ 187 static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) 188 { 189 struct eeh_dev *edev; 190 struct eeh_pe pe; 191 struct pci_dn *pdn = PCI_DN(dn); 192 const __be32 *classp, *vendorp, *devicep; 193 u32 class_code; 194 const __be32 *regs; 195 u32 pcie_flags; 196 int enable = 0; 197 int ret; 198 199 /* Retrieve OF node and eeh device */ 200 edev = of_node_to_eeh_dev(dn); 201 if (edev->pe || !of_device_is_available(dn)) 202 return NULL; 203 204 /* Retrieve class/vendor/device IDs */ 205 classp = of_get_property(dn, "class-code", NULL); 206 vendorp = of_get_property(dn, "vendor-id", NULL); 207 devicep = of_get_property(dn, "device-id", NULL); 208 209 /* Skip for bad OF node or PCI-ISA bridge */ 210 if (!classp || !vendorp || !devicep) 211 return NULL; 212 if (dn->type && !strcmp(dn->type, "isa")) 213 return NULL; 214 215 class_code = of_read_number(classp, 1); 216 217 /* 218 * Update class code and mode of eeh device. We need 219 * correctly reflects that current device is root port 220 * or PCIe switch downstream port. 221 */ 222 edev->class_code = class_code; 223 edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP); 224 edev->mode &= 0xFFFFFF00; 225 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 226 edev->mode |= EEH_DEV_BRIDGE; 227 if (edev->pcie_cap) { 228 rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 229 2, &pcie_flags); 230 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 231 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 232 edev->mode |= EEH_DEV_ROOT_PORT; 233 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 234 edev->mode |= EEH_DEV_DS_PORT; 235 } 236 } 237 238 /* Retrieve the device address */ 239 regs = of_get_property(dn, "reg", NULL); 240 if (!regs) { 241 pr_warning("%s: OF node property %s::reg not found\n", 242 __func__, dn->full_name); 243 return NULL; 244 } 245 246 /* Initialize the fake PE */ 247 memset(&pe, 0, sizeof(struct eeh_pe)); 248 pe.phb = edev->phb; 249 pe.config_addr = of_read_number(regs, 1); 250 251 /* Enable EEH on the device */ 252 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); 253 if (!ret) { 254 edev->config_addr = of_read_number(regs, 1); 255 /* Retrieve PE address */ 256 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); 257 pe.addr = edev->pe_config_addr; 258 259 /* Some older systems (Power4) allow the ibm,set-eeh-option 260 * call to succeed even on nodes where EEH is not supported. 261 * Verify support explicitly. 262 */ 263 ret = eeh_ops->get_state(&pe, NULL); 264 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) 265 enable = 1; 266 267 if (enable) { 268 eeh_set_enable(true); 269 eeh_add_to_parent_pe(edev); 270 271 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", 272 __func__, dn->full_name, pe.phb->global_number, 273 pe.addr, pe.config_addr); 274 } else if (dn->parent && of_node_to_eeh_dev(dn->parent) && 275 (of_node_to_eeh_dev(dn->parent))->pe) { 276 /* This device doesn't support EEH, but it may have an 277 * EEH parent, in which case we mark it as supported. 278 */ 279 edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr; 280 edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr; 281 eeh_add_to_parent_pe(edev); 282 } 283 } 284 285 /* Save memory bars */ 286 eeh_save_bars(edev); 287 288 return NULL; 289 } 290 291 /** 292 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable 293 * @pe: EEH PE 294 * @option: operation to be issued 295 * 296 * The function is used to control the EEH functionality globally. 297 * Currently, following options are support according to PAPR: 298 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 299 */ 300 static int pseries_eeh_set_option(struct eeh_pe *pe, int option) 301 { 302 int ret = 0; 303 int config_addr; 304 305 /* 306 * When we're enabling or disabling EEH functioality on 307 * the particular PE, the PE config address is possibly 308 * unavailable. Therefore, we have to figure it out from 309 * the FDT node. 310 */ 311 switch (option) { 312 case EEH_OPT_DISABLE: 313 case EEH_OPT_ENABLE: 314 case EEH_OPT_THAW_MMIO: 315 case EEH_OPT_THAW_DMA: 316 config_addr = pe->config_addr; 317 if (pe->addr) 318 config_addr = pe->addr; 319 break; 320 321 default: 322 pr_err("%s: Invalid option %d\n", 323 __func__, option); 324 return -EINVAL; 325 } 326 327 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 328 config_addr, BUID_HI(pe->phb->buid), 329 BUID_LO(pe->phb->buid), option); 330 331 return ret; 332 } 333 334 /** 335 * pseries_eeh_get_pe_addr - Retrieve PE address 336 * @pe: EEH PE 337 * 338 * Retrieve the assocated PE address. Actually, there're 2 RTAS 339 * function calls dedicated for the purpose. We need implement 340 * it through the new function and then the old one. Besides, 341 * you should make sure the config address is figured out from 342 * FDT node before calling the function. 343 * 344 * It's notable that zero'ed return value means invalid PE config 345 * address. 346 */ 347 static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) 348 { 349 int ret = 0; 350 int rets[3]; 351 352 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { 353 /* 354 * First of all, we need to make sure there has one PE 355 * associated with the device. Otherwise, PE address is 356 * meaningless. 357 */ 358 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 359 pe->config_addr, BUID_HI(pe->phb->buid), 360 BUID_LO(pe->phb->buid), 1); 361 if (ret || (rets[0] == 0)) 362 return 0; 363 364 /* Retrieve the associated PE config address */ 365 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 366 pe->config_addr, BUID_HI(pe->phb->buid), 367 BUID_LO(pe->phb->buid), 0); 368 if (ret) { 369 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", 370 __func__, pe->phb->global_number, pe->config_addr); 371 return 0; 372 } 373 374 return rets[0]; 375 } 376 377 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { 378 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, 379 pe->config_addr, BUID_HI(pe->phb->buid), 380 BUID_LO(pe->phb->buid), 0); 381 if (ret) { 382 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", 383 __func__, pe->phb->global_number, pe->config_addr); 384 return 0; 385 } 386 387 return rets[0]; 388 } 389 390 return ret; 391 } 392 393 /** 394 * pseries_eeh_get_state - Retrieve PE state 395 * @pe: EEH PE 396 * @state: return value 397 * 398 * Retrieve the state of the specified PE. On RTAS compliant 399 * pseries platform, there already has one dedicated RTAS function 400 * for the purpose. It's notable that the associated PE config address 401 * might be ready when calling the function. Therefore, endeavour to 402 * use the PE config address if possible. Further more, there're 2 403 * RTAS calls for the purpose, we need to try the new one and back 404 * to the old one if the new one couldn't work properly. 405 */ 406 static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) 407 { 408 int config_addr; 409 int ret; 410 int rets[4]; 411 int result; 412 413 /* Figure out PE config address if possible */ 414 config_addr = pe->config_addr; 415 if (pe->addr) 416 config_addr = pe->addr; 417 418 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 419 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, 420 config_addr, BUID_HI(pe->phb->buid), 421 BUID_LO(pe->phb->buid)); 422 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { 423 /* Fake PE unavailable info */ 424 rets[2] = 0; 425 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, 426 config_addr, BUID_HI(pe->phb->buid), 427 BUID_LO(pe->phb->buid)); 428 } else { 429 return EEH_STATE_NOT_SUPPORT; 430 } 431 432 if (ret) 433 return ret; 434 435 /* Parse the result out */ 436 result = 0; 437 if (rets[1]) { 438 switch(rets[0]) { 439 case 0: 440 result &= ~EEH_STATE_RESET_ACTIVE; 441 result |= EEH_STATE_MMIO_ACTIVE; 442 result |= EEH_STATE_DMA_ACTIVE; 443 break; 444 case 1: 445 result |= EEH_STATE_RESET_ACTIVE; 446 result |= EEH_STATE_MMIO_ACTIVE; 447 result |= EEH_STATE_DMA_ACTIVE; 448 break; 449 case 2: 450 result &= ~EEH_STATE_RESET_ACTIVE; 451 result &= ~EEH_STATE_MMIO_ACTIVE; 452 result &= ~EEH_STATE_DMA_ACTIVE; 453 break; 454 case 4: 455 result &= ~EEH_STATE_RESET_ACTIVE; 456 result &= ~EEH_STATE_MMIO_ACTIVE; 457 result &= ~EEH_STATE_DMA_ACTIVE; 458 result |= EEH_STATE_MMIO_ENABLED; 459 break; 460 case 5: 461 if (rets[2]) { 462 if (state) *state = rets[2]; 463 result = EEH_STATE_UNAVAILABLE; 464 } else { 465 result = EEH_STATE_NOT_SUPPORT; 466 } 467 default: 468 result = EEH_STATE_NOT_SUPPORT; 469 } 470 } else { 471 result = EEH_STATE_NOT_SUPPORT; 472 } 473 474 return result; 475 } 476 477 /** 478 * pseries_eeh_reset - Reset the specified PE 479 * @pe: EEH PE 480 * @option: reset option 481 * 482 * Reset the specified PE 483 */ 484 static int pseries_eeh_reset(struct eeh_pe *pe, int option) 485 { 486 int config_addr; 487 int ret; 488 489 /* Figure out PE address */ 490 config_addr = pe->config_addr; 491 if (pe->addr) 492 config_addr = pe->addr; 493 494 /* Reset PE through RTAS call */ 495 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 496 config_addr, BUID_HI(pe->phb->buid), 497 BUID_LO(pe->phb->buid), option); 498 499 /* If fundamental-reset not supported, try hot-reset */ 500 if (option == EEH_RESET_FUNDAMENTAL && 501 ret == -8) { 502 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 503 config_addr, BUID_HI(pe->phb->buid), 504 BUID_LO(pe->phb->buid), EEH_RESET_HOT); 505 } 506 507 return ret; 508 } 509 510 /** 511 * pseries_eeh_wait_state - Wait for PE state 512 * @pe: EEH PE 513 * @max_wait: maximal period in microsecond 514 * 515 * Wait for the state of associated PE. It might take some time 516 * to retrieve the PE's state. 517 */ 518 static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) 519 { 520 int ret; 521 int mwait; 522 523 /* 524 * According to PAPR, the state of PE might be temporarily 525 * unavailable. Under the circumstance, we have to wait 526 * for indicated time determined by firmware. The maximal 527 * wait time is 5 minutes, which is acquired from the original 528 * EEH implementation. Also, the original implementation 529 * also defined the minimal wait time as 1 second. 530 */ 531 #define EEH_STATE_MIN_WAIT_TIME (1000) 532 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) 533 534 while (1) { 535 ret = pseries_eeh_get_state(pe, &mwait); 536 537 /* 538 * If the PE's state is temporarily unavailable, 539 * we have to wait for the specified time. Otherwise, 540 * the PE's state will be returned immediately. 541 */ 542 if (ret != EEH_STATE_UNAVAILABLE) 543 return ret; 544 545 if (max_wait <= 0) { 546 pr_warning("%s: Timeout when getting PE's state (%d)\n", 547 __func__, max_wait); 548 return EEH_STATE_NOT_SUPPORT; 549 } 550 551 if (mwait <= 0) { 552 pr_warning("%s: Firmware returned bad wait value %d\n", 553 __func__, mwait); 554 mwait = EEH_STATE_MIN_WAIT_TIME; 555 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { 556 pr_warning("%s: Firmware returned too long wait value %d\n", 557 __func__, mwait); 558 mwait = EEH_STATE_MAX_WAIT_TIME; 559 } 560 561 max_wait -= mwait; 562 msleep(mwait); 563 } 564 565 return EEH_STATE_NOT_SUPPORT; 566 } 567 568 /** 569 * pseries_eeh_get_log - Retrieve error log 570 * @pe: EEH PE 571 * @severity: temporary or permanent error log 572 * @drv_log: driver log to be combined with retrieved error log 573 * @len: length of driver log 574 * 575 * Retrieve the temporary or permanent error from the PE. 576 * Actually, the error will be retrieved through the dedicated 577 * RTAS call. 578 */ 579 static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) 580 { 581 int config_addr; 582 unsigned long flags; 583 int ret; 584 585 spin_lock_irqsave(&slot_errbuf_lock, flags); 586 memset(slot_errbuf, 0, eeh_error_buf_size); 587 588 /* Figure out the PE address */ 589 config_addr = pe->config_addr; 590 if (pe->addr) 591 config_addr = pe->addr; 592 593 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, 594 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), 595 virt_to_phys(drv_log), len, 596 virt_to_phys(slot_errbuf), eeh_error_buf_size, 597 severity); 598 if (!ret) 599 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); 600 spin_unlock_irqrestore(&slot_errbuf_lock, flags); 601 602 return ret; 603 } 604 605 /** 606 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE 607 * @pe: EEH PE 608 * 609 * The function will be called to reconfigure the bridges included 610 * in the specified PE so that the mulfunctional PE would be recovered 611 * again. 612 */ 613 static int pseries_eeh_configure_bridge(struct eeh_pe *pe) 614 { 615 int config_addr; 616 int ret; 617 618 /* Figure out the PE address */ 619 config_addr = pe->config_addr; 620 if (pe->addr) 621 config_addr = pe->addr; 622 623 /* Use new configure-pe function, if supported */ 624 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { 625 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 626 config_addr, BUID_HI(pe->phb->buid), 627 BUID_LO(pe->phb->buid)); 628 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { 629 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, 630 config_addr, BUID_HI(pe->phb->buid), 631 BUID_LO(pe->phb->buid)); 632 } else { 633 return -EFAULT; 634 } 635 636 if (ret) 637 pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", 638 __func__, pe->phb->global_number, pe->addr, ret); 639 640 return ret; 641 } 642 643 /** 644 * pseries_eeh_read_config - Read PCI config space 645 * @dn: device node 646 * @where: PCI address 647 * @size: size to read 648 * @val: return value 649 * 650 * Read config space from the speicifed device 651 */ 652 static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val) 653 { 654 struct pci_dn *pdn; 655 656 pdn = PCI_DN(dn); 657 658 return rtas_read_config(pdn, where, size, val); 659 } 660 661 /** 662 * pseries_eeh_write_config - Write PCI config space 663 * @dn: device node 664 * @where: PCI address 665 * @size: size to write 666 * @val: value to be written 667 * 668 * Write config space to the specified device 669 */ 670 static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val) 671 { 672 struct pci_dn *pdn; 673 674 pdn = PCI_DN(dn); 675 676 return rtas_write_config(pdn, where, size, val); 677 } 678 679 static struct eeh_ops pseries_eeh_ops = { 680 .name = "pseries", 681 .init = pseries_eeh_init, 682 .of_probe = pseries_eeh_of_probe, 683 .dev_probe = NULL, 684 .set_option = pseries_eeh_set_option, 685 .get_pe_addr = pseries_eeh_get_pe_addr, 686 .get_state = pseries_eeh_get_state, 687 .reset = pseries_eeh_reset, 688 .wait_state = pseries_eeh_wait_state, 689 .get_log = pseries_eeh_get_log, 690 .configure_bridge = pseries_eeh_configure_bridge, 691 .read_config = pseries_eeh_read_config, 692 .write_config = pseries_eeh_write_config, 693 .next_error = NULL, 694 .restore_config = NULL 695 }; 696 697 /** 698 * eeh_pseries_init - Register platform dependent EEH operations 699 * 700 * EEH initialization on pseries platform. This function should be 701 * called before any EEH related functions. 702 */ 703 static int __init eeh_pseries_init(void) 704 { 705 int ret = -EINVAL; 706 707 if (!machine_is(pseries)) 708 return ret; 709 710 ret = eeh_ops_register(&pseries_eeh_ops); 711 if (!ret) 712 pr_info("EEH: pSeries platform initialized\n"); 713 else 714 pr_info("EEH: pSeries platform initialization failure (%d)\n", 715 ret); 716 717 return ret; 718 } 719 720 early_initcall(eeh_pseries_init); 721