1 /* 2 * The file intends to implement the platform dependent EEH operations on pseries. 3 * Actually, the pseries platform is built based on RTAS heavily. That means the 4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions 5 * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has 6 * been done. 7 * 8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. 9 * Copyright IBM Corporation 2001, 2005, 2006 10 * Copyright Dave Engebretsen & Todd Inglett 2001 11 * Copyright Linas Vepstas 2005, 2006 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28 #include <linux/atomic.h> 29 #include <linux/delay.h> 30 #include <linux/export.h> 31 #include <linux/init.h> 32 #include <linux/list.h> 33 #include <linux/of.h> 34 #include <linux/pci.h> 35 #include <linux/proc_fs.h> 36 #include <linux/rbtree.h> 37 #include <linux/sched.h> 38 #include <linux/seq_file.h> 39 #include <linux/spinlock.h> 40 41 #include <asm/eeh.h> 42 #include <asm/eeh_event.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/ppc-pci.h> 46 #include <asm/rtas.h> 47 48 /* RTAS tokens */ 49 static int ibm_set_eeh_option; 50 static int ibm_set_slot_reset; 51 static int ibm_read_slot_reset_state; 52 static int ibm_read_slot_reset_state2; 53 static int ibm_slot_error_detail; 54 static int ibm_get_config_addr_info; 55 static int ibm_get_config_addr_info2; 56 static int ibm_configure_bridge; 57 static int ibm_configure_pe; 58 59 /* 60 * Buffer for reporting slot-error-detail rtas calls. Its here 61 * in BSS, and not dynamically alloced, so that it ends up in 62 * RMO where RTAS can access it. 63 */ 64 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; 65 static DEFINE_SPINLOCK(slot_errbuf_lock); 66 static int eeh_error_buf_size; 67 68 /** 69 * pseries_eeh_init - EEH platform dependent initialization 70 * 71 * EEH platform dependent initialization on pseries. 72 */ 73 static int pseries_eeh_init(void) 74 { 75 /* figure out EEH RTAS function call tokens */ 76 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); 77 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); 78 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); 79 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); 80 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); 81 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 82 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 83 ibm_configure_pe = rtas_token("ibm,configure-pe"); 84 ibm_configure_bridge = rtas_token("ibm,configure-bridge"); 85 86 /* 87 * Necessary sanity check. We needn't check "get-config-addr-info" 88 * and its variant since the old firmware probably support address 89 * of domain/bus/slot/function for EEH RTAS operations. 90 */ 91 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { 92 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", 93 __func__); 94 return -EINVAL; 95 } else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) { 96 pr_warning("%s: RTAS service <ibm,set-slot-reset> invalid\n", 97 __func__); 98 return -EINVAL; 99 } else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && 100 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) { 101 pr_warning("%s: RTAS service <ibm,read-slot-reset-state2> and " 102 "<ibm,read-slot-reset-state> invalid\n", 103 __func__); 104 return -EINVAL; 105 } else if (ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE) { 106 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", 107 __func__); 108 return -EINVAL; 109 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && 110 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { 111 pr_warning("%s: RTAS service <ibm,configure-pe> and " 112 "<ibm,configure-bridge> invalid\n", 113 __func__); 114 return -EINVAL; 115 } 116 117 /* Initialize error log lock and size */ 118 spin_lock_init(&slot_errbuf_lock); 119 eeh_error_buf_size = rtas_token("rtas-error-log-max"); 120 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { 121 pr_warning("%s: unknown EEH error log size\n", 122 __func__); 123 eeh_error_buf_size = 1024; 124 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { 125 pr_warning("%s: EEH error log size %d exceeds the maximal %d\n", 126 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); 127 eeh_error_buf_size = RTAS_ERROR_LOG_MAX; 128 } 129 130 /* Set EEH probe mode */ 131 eeh_probe_mode_set(EEH_PROBE_MODE_DEVTREE); 132 133 return 0; 134 } 135 136 /** 137 * pseries_eeh_of_probe - EEH probe on the given device 138 * @dn: OF node 139 * @flag: Unused 140 * 141 * When EEH module is installed during system boot, all PCI devices 142 * are checked one by one to see if it supports EEH. The function 143 * is introduced for the purpose. 144 */ 145 static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) 146 { 147 struct eeh_dev *edev; 148 struct eeh_pe pe; 149 const u32 *class_code, *vendor_id, *device_id; 150 const u32 *regs; 151 int enable = 0; 152 int ret; 153 154 /* Retrieve OF node and eeh device */ 155 edev = of_node_to_eeh_dev(dn); 156 if (!of_device_is_available(dn)) 157 return NULL; 158 159 /* Retrieve class/vendor/device IDs */ 160 class_code = of_get_property(dn, "class-code", NULL); 161 vendor_id = of_get_property(dn, "vendor-id", NULL); 162 device_id = of_get_property(dn, "device-id", NULL); 163 164 /* Skip for bad OF node or PCI-ISA bridge */ 165 if (!class_code || !vendor_id || !device_id) 166 return NULL; 167 if (dn->type && !strcmp(dn->type, "isa")) 168 return NULL; 169 170 /* Update class code and mode of eeh device */ 171 edev->class_code = *class_code; 172 edev->mode = 0; 173 174 /* Retrieve the device address */ 175 regs = of_get_property(dn, "reg", NULL); 176 if (!regs) { 177 pr_warning("%s: OF node property %s::reg not found\n", 178 __func__, dn->full_name); 179 return NULL; 180 } 181 182 /* Initialize the fake PE */ 183 memset(&pe, 0, sizeof(struct eeh_pe)); 184 pe.phb = edev->phb; 185 pe.config_addr = regs[0]; 186 187 /* Enable EEH on the device */ 188 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); 189 if (!ret) { 190 edev->config_addr = regs[0]; 191 /* Retrieve PE address */ 192 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); 193 pe.addr = edev->pe_config_addr; 194 195 /* Some older systems (Power4) allow the ibm,set-eeh-option 196 * call to succeed even on nodes where EEH is not supported. 197 * Verify support explicitly. 198 */ 199 ret = eeh_ops->get_state(&pe, NULL); 200 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) 201 enable = 1; 202 203 if (enable) { 204 eeh_subsystem_enabled = 1; 205 eeh_add_to_parent_pe(edev); 206 207 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", 208 __func__, dn->full_name, pe.phb->global_number, 209 pe.addr, pe.config_addr); 210 } else if (dn->parent && of_node_to_eeh_dev(dn->parent) && 211 (of_node_to_eeh_dev(dn->parent))->pe) { 212 /* This device doesn't support EEH, but it may have an 213 * EEH parent, in which case we mark it as supported. 214 */ 215 edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr; 216 edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr; 217 eeh_add_to_parent_pe(edev); 218 } 219 } 220 221 /* Save memory bars */ 222 eeh_save_bars(edev); 223 224 return NULL; 225 } 226 227 /** 228 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable 229 * @pe: EEH PE 230 * @option: operation to be issued 231 * 232 * The function is used to control the EEH functionality globally. 233 * Currently, following options are support according to PAPR: 234 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 235 */ 236 static int pseries_eeh_set_option(struct eeh_pe *pe, int option) 237 { 238 int ret = 0; 239 int config_addr; 240 241 /* 242 * When we're enabling or disabling EEH functioality on 243 * the particular PE, the PE config address is possibly 244 * unavailable. Therefore, we have to figure it out from 245 * the FDT node. 246 */ 247 switch (option) { 248 case EEH_OPT_DISABLE: 249 case EEH_OPT_ENABLE: 250 case EEH_OPT_THAW_MMIO: 251 case EEH_OPT_THAW_DMA: 252 config_addr = pe->config_addr; 253 if (pe->addr) 254 config_addr = pe->addr; 255 break; 256 257 default: 258 pr_err("%s: Invalid option %d\n", 259 __func__, option); 260 return -EINVAL; 261 } 262 263 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 264 config_addr, BUID_HI(pe->phb->buid), 265 BUID_LO(pe->phb->buid), option); 266 267 return ret; 268 } 269 270 /** 271 * pseries_eeh_get_pe_addr - Retrieve PE address 272 * @pe: EEH PE 273 * 274 * Retrieve the assocated PE address. Actually, there're 2 RTAS 275 * function calls dedicated for the purpose. We need implement 276 * it through the new function and then the old one. Besides, 277 * you should make sure the config address is figured out from 278 * FDT node before calling the function. 279 * 280 * It's notable that zero'ed return value means invalid PE config 281 * address. 282 */ 283 static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) 284 { 285 int ret = 0; 286 int rets[3]; 287 288 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { 289 /* 290 * First of all, we need to make sure there has one PE 291 * associated with the device. Otherwise, PE address is 292 * meaningless. 293 */ 294 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 295 pe->config_addr, BUID_HI(pe->phb->buid), 296 BUID_LO(pe->phb->buid), 1); 297 if (ret || (rets[0] == 0)) 298 return 0; 299 300 /* Retrieve the associated PE config address */ 301 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 302 pe->config_addr, BUID_HI(pe->phb->buid), 303 BUID_LO(pe->phb->buid), 0); 304 if (ret) { 305 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", 306 __func__, pe->phb->global_number, pe->config_addr); 307 return 0; 308 } 309 310 return rets[0]; 311 } 312 313 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { 314 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, 315 pe->config_addr, BUID_HI(pe->phb->buid), 316 BUID_LO(pe->phb->buid), 0); 317 if (ret) { 318 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", 319 __func__, pe->phb->global_number, pe->config_addr); 320 return 0; 321 } 322 323 return rets[0]; 324 } 325 326 return ret; 327 } 328 329 /** 330 * pseries_eeh_get_state - Retrieve PE state 331 * @pe: EEH PE 332 * @state: return value 333 * 334 * Retrieve the state of the specified PE. On RTAS compliant 335 * pseries platform, there already has one dedicated RTAS function 336 * for the purpose. It's notable that the associated PE config address 337 * might be ready when calling the function. Therefore, endeavour to 338 * use the PE config address if possible. Further more, there're 2 339 * RTAS calls for the purpose, we need to try the new one and back 340 * to the old one if the new one couldn't work properly. 341 */ 342 static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) 343 { 344 int config_addr; 345 int ret; 346 int rets[4]; 347 int result; 348 349 /* Figure out PE config address if possible */ 350 config_addr = pe->config_addr; 351 if (pe->addr) 352 config_addr = pe->addr; 353 354 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 355 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, 356 config_addr, BUID_HI(pe->phb->buid), 357 BUID_LO(pe->phb->buid)); 358 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { 359 /* Fake PE unavailable info */ 360 rets[2] = 0; 361 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, 362 config_addr, BUID_HI(pe->phb->buid), 363 BUID_LO(pe->phb->buid)); 364 } else { 365 return EEH_STATE_NOT_SUPPORT; 366 } 367 368 if (ret) 369 return ret; 370 371 /* Parse the result out */ 372 result = 0; 373 if (rets[1]) { 374 switch(rets[0]) { 375 case 0: 376 result &= ~EEH_STATE_RESET_ACTIVE; 377 result |= EEH_STATE_MMIO_ACTIVE; 378 result |= EEH_STATE_DMA_ACTIVE; 379 break; 380 case 1: 381 result |= EEH_STATE_RESET_ACTIVE; 382 result |= EEH_STATE_MMIO_ACTIVE; 383 result |= EEH_STATE_DMA_ACTIVE; 384 break; 385 case 2: 386 result &= ~EEH_STATE_RESET_ACTIVE; 387 result &= ~EEH_STATE_MMIO_ACTIVE; 388 result &= ~EEH_STATE_DMA_ACTIVE; 389 break; 390 case 4: 391 result &= ~EEH_STATE_RESET_ACTIVE; 392 result &= ~EEH_STATE_MMIO_ACTIVE; 393 result &= ~EEH_STATE_DMA_ACTIVE; 394 result |= EEH_STATE_MMIO_ENABLED; 395 break; 396 case 5: 397 if (rets[2]) { 398 if (state) *state = rets[2]; 399 result = EEH_STATE_UNAVAILABLE; 400 } else { 401 result = EEH_STATE_NOT_SUPPORT; 402 } 403 default: 404 result = EEH_STATE_NOT_SUPPORT; 405 } 406 } else { 407 result = EEH_STATE_NOT_SUPPORT; 408 } 409 410 return result; 411 } 412 413 /** 414 * pseries_eeh_reset - Reset the specified PE 415 * @pe: EEH PE 416 * @option: reset option 417 * 418 * Reset the specified PE 419 */ 420 static int pseries_eeh_reset(struct eeh_pe *pe, int option) 421 { 422 int config_addr; 423 int ret; 424 425 /* Figure out PE address */ 426 config_addr = pe->config_addr; 427 if (pe->addr) 428 config_addr = pe->addr; 429 430 /* Reset PE through RTAS call */ 431 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 432 config_addr, BUID_HI(pe->phb->buid), 433 BUID_LO(pe->phb->buid), option); 434 435 /* If fundamental-reset not supported, try hot-reset */ 436 if (option == EEH_RESET_FUNDAMENTAL && 437 ret == -8) { 438 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 439 config_addr, BUID_HI(pe->phb->buid), 440 BUID_LO(pe->phb->buid), EEH_RESET_HOT); 441 } 442 443 return ret; 444 } 445 446 /** 447 * pseries_eeh_wait_state - Wait for PE state 448 * @pe: EEH PE 449 * @max_wait: maximal period in microsecond 450 * 451 * Wait for the state of associated PE. It might take some time 452 * to retrieve the PE's state. 453 */ 454 static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) 455 { 456 int ret; 457 int mwait; 458 459 /* 460 * According to PAPR, the state of PE might be temporarily 461 * unavailable. Under the circumstance, we have to wait 462 * for indicated time determined by firmware. The maximal 463 * wait time is 5 minutes, which is acquired from the original 464 * EEH implementation. Also, the original implementation 465 * also defined the minimal wait time as 1 second. 466 */ 467 #define EEH_STATE_MIN_WAIT_TIME (1000) 468 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) 469 470 while (1) { 471 ret = pseries_eeh_get_state(pe, &mwait); 472 473 /* 474 * If the PE's state is temporarily unavailable, 475 * we have to wait for the specified time. Otherwise, 476 * the PE's state will be returned immediately. 477 */ 478 if (ret != EEH_STATE_UNAVAILABLE) 479 return ret; 480 481 if (max_wait <= 0) { 482 pr_warning("%s: Timeout when getting PE's state (%d)\n", 483 __func__, max_wait); 484 return EEH_STATE_NOT_SUPPORT; 485 } 486 487 if (mwait <= 0) { 488 pr_warning("%s: Firmware returned bad wait value %d\n", 489 __func__, mwait); 490 mwait = EEH_STATE_MIN_WAIT_TIME; 491 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { 492 pr_warning("%s: Firmware returned too long wait value %d\n", 493 __func__, mwait); 494 mwait = EEH_STATE_MAX_WAIT_TIME; 495 } 496 497 max_wait -= mwait; 498 msleep(mwait); 499 } 500 501 return EEH_STATE_NOT_SUPPORT; 502 } 503 504 /** 505 * pseries_eeh_get_log - Retrieve error log 506 * @pe: EEH PE 507 * @severity: temporary or permanent error log 508 * @drv_log: driver log to be combined with retrieved error log 509 * @len: length of driver log 510 * 511 * Retrieve the temporary or permanent error from the PE. 512 * Actually, the error will be retrieved through the dedicated 513 * RTAS call. 514 */ 515 static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) 516 { 517 int config_addr; 518 unsigned long flags; 519 int ret; 520 521 spin_lock_irqsave(&slot_errbuf_lock, flags); 522 memset(slot_errbuf, 0, eeh_error_buf_size); 523 524 /* Figure out the PE address */ 525 config_addr = pe->config_addr; 526 if (pe->addr) 527 config_addr = pe->addr; 528 529 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, 530 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), 531 virt_to_phys(drv_log), len, 532 virt_to_phys(slot_errbuf), eeh_error_buf_size, 533 severity); 534 if (!ret) 535 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); 536 spin_unlock_irqrestore(&slot_errbuf_lock, flags); 537 538 return ret; 539 } 540 541 /** 542 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE 543 * @pe: EEH PE 544 * 545 * The function will be called to reconfigure the bridges included 546 * in the specified PE so that the mulfunctional PE would be recovered 547 * again. 548 */ 549 static int pseries_eeh_configure_bridge(struct eeh_pe *pe) 550 { 551 int config_addr; 552 int ret; 553 554 /* Figure out the PE address */ 555 config_addr = pe->config_addr; 556 if (pe->addr) 557 config_addr = pe->addr; 558 559 /* Use new configure-pe function, if supported */ 560 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { 561 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 562 config_addr, BUID_HI(pe->phb->buid), 563 BUID_LO(pe->phb->buid)); 564 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { 565 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, 566 config_addr, BUID_HI(pe->phb->buid), 567 BUID_LO(pe->phb->buid)); 568 } else { 569 return -EFAULT; 570 } 571 572 if (ret) 573 pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", 574 __func__, pe->phb->global_number, pe->addr, ret); 575 576 return ret; 577 } 578 579 /** 580 * pseries_eeh_read_config - Read PCI config space 581 * @dn: device node 582 * @where: PCI address 583 * @size: size to read 584 * @val: return value 585 * 586 * Read config space from the speicifed device 587 */ 588 static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val) 589 { 590 struct pci_dn *pdn; 591 592 pdn = PCI_DN(dn); 593 594 return rtas_read_config(pdn, where, size, val); 595 } 596 597 /** 598 * pseries_eeh_write_config - Write PCI config space 599 * @dn: device node 600 * @where: PCI address 601 * @size: size to write 602 * @val: value to be written 603 * 604 * Write config space to the specified device 605 */ 606 static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val) 607 { 608 struct pci_dn *pdn; 609 610 pdn = PCI_DN(dn); 611 612 return rtas_write_config(pdn, where, size, val); 613 } 614 615 static struct eeh_ops pseries_eeh_ops = { 616 .name = "pseries", 617 .init = pseries_eeh_init, 618 .of_probe = pseries_eeh_of_probe, 619 .dev_probe = NULL, 620 .set_option = pseries_eeh_set_option, 621 .get_pe_addr = pseries_eeh_get_pe_addr, 622 .get_state = pseries_eeh_get_state, 623 .reset = pseries_eeh_reset, 624 .wait_state = pseries_eeh_wait_state, 625 .get_log = pseries_eeh_get_log, 626 .configure_bridge = pseries_eeh_configure_bridge, 627 .read_config = pseries_eeh_read_config, 628 .write_config = pseries_eeh_write_config 629 }; 630 631 /** 632 * eeh_pseries_init - Register platform dependent EEH operations 633 * 634 * EEH initialization on pseries platform. This function should be 635 * called before any EEH related functions. 636 */ 637 static int __init eeh_pseries_init(void) 638 { 639 int ret = -EINVAL; 640 641 if (!machine_is(pseries)) 642 return ret; 643 644 ret = eeh_ops_register(&pseries_eeh_ops); 645 if (!ret) 646 pr_info("EEH: pSeries platform initialized\n"); 647 else 648 pr_info("EEH: pSeries platform initialization failure (%d)\n", 649 ret); 650 651 return ret; 652 } 653 654 early_initcall(eeh_pseries_init); 655