1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implement the AER root port service driver. The driver registers an IRQ 4 * handler. When a root port triggers an AER interrupt, the IRQ handler 5 * collects root port status and schedules work. 6 * 7 * Copyright (C) 2006 Intel Corp. 8 * Tom Long Nguyen (tom.l.nguyen@intel.com) 9 * Zhang Yanmin (yanmin.zhang@intel.com) 10 * 11 * (C) Copyright 2009 Hewlett-Packard Development Company, L.P. 12 * Andrew Patterson <andrew.patterson@hp.com> 13 */ 14 15 #include <linux/cper.h> 16 #include <linux/pci.h> 17 #include <linux/pci-acpi.h> 18 #include <linux/sched.h> 19 #include <linux/kernel.h> 20 #include <linux/errno.h> 21 #include <linux/pm.h> 22 #include <linux/init.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 #include <linux/kfifo.h> 26 #include <linux/slab.h> 27 #include <acpi/apei.h> 28 #include <ras/ras_event.h> 29 30 #include "../pci.h" 31 #include "portdrv.h" 32 33 #define AER_ERROR_SOURCES_MAX 128 34 35 #define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */ 36 #define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/ 37 38 struct aer_err_source { 39 unsigned int status; 40 unsigned int id; 41 }; 42 43 struct aer_rpc { 44 struct pci_dev *rpd; /* Root Port device */ 45 DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX); 46 }; 47 48 /* AER stats for the device */ 49 struct aer_stats { 50 51 /* 52 * Fields for all AER capable devices. They indicate the errors 53 * "as seen by this device". Note that this may mean that if an 54 * end point is causing problems, the AER counters may increment 55 * at its link partner (e.g. root port) because the errors will be 56 * "seen" by the link partner and not the the problematic end point 57 * itself (which may report all counters as 0 as it never saw any 58 * problems). 59 */ 60 /* Counters for different type of correctable errors */ 61 u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS]; 62 /* Counters for different type of fatal uncorrectable errors */ 63 u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS]; 64 /* Counters for different type of nonfatal uncorrectable errors */ 65 u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS]; 66 /* Total number of ERR_COR sent by this device */ 67 u64 dev_total_cor_errs; 68 /* Total number of ERR_FATAL sent by this device */ 69 u64 dev_total_fatal_errs; 70 /* Total number of ERR_NONFATAL sent by this device */ 71 u64 dev_total_nonfatal_errs; 72 73 /* 74 * Fields for Root ports & root complex event collectors only, these 75 * indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL 76 * messages received by the root port / event collector, INCLUDING the 77 * ones that are generated internally (by the rootport itself) 78 */ 79 u64 rootport_total_cor_errs; 80 u64 rootport_total_fatal_errs; 81 u64 rootport_total_nonfatal_errs; 82 }; 83 84 #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \ 85 PCI_ERR_UNC_ECRC| \ 86 PCI_ERR_UNC_UNSUP| \ 87 PCI_ERR_UNC_COMP_ABORT| \ 88 PCI_ERR_UNC_UNX_COMP| \ 89 PCI_ERR_UNC_MALF_TLP) 90 91 #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \ 92 PCI_EXP_RTCTL_SENFEE| \ 93 PCI_EXP_RTCTL_SEFEE) 94 #define ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \ 95 PCI_ERR_ROOT_CMD_NONFATAL_EN| \ 96 PCI_ERR_ROOT_CMD_FATAL_EN) 97 #define ERR_COR_ID(d) (d & 0xffff) 98 #define ERR_UNCOR_ID(d) (d >> 16) 99 100 static int pcie_aer_disable; 101 102 void pci_no_aer(void) 103 { 104 pcie_aer_disable = 1; 105 } 106 107 bool pci_aer_available(void) 108 { 109 return !pcie_aer_disable && pci_msi_enabled(); 110 } 111 112 #ifdef CONFIG_PCIE_ECRC 113 114 #define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */ 115 #define ECRC_POLICY_OFF 1 /* ECRC off for performance */ 116 #define ECRC_POLICY_ON 2 /* ECRC on for data integrity */ 117 118 static int ecrc_policy = ECRC_POLICY_DEFAULT; 119 120 static const char * const ecrc_policy_str[] = { 121 [ECRC_POLICY_DEFAULT] = "bios", 122 [ECRC_POLICY_OFF] = "off", 123 [ECRC_POLICY_ON] = "on" 124 }; 125 126 /** 127 * enable_ercr_checking - enable PCIe ECRC checking for a device 128 * @dev: the PCI device 129 * 130 * Returns 0 on success, or negative on failure. 131 */ 132 static int enable_ecrc_checking(struct pci_dev *dev) 133 { 134 int pos; 135 u32 reg32; 136 137 if (!pci_is_pcie(dev)) 138 return -ENODEV; 139 140 pos = dev->aer_cap; 141 if (!pos) 142 return -ENODEV; 143 144 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); 145 if (reg32 & PCI_ERR_CAP_ECRC_GENC) 146 reg32 |= PCI_ERR_CAP_ECRC_GENE; 147 if (reg32 & PCI_ERR_CAP_ECRC_CHKC) 148 reg32 |= PCI_ERR_CAP_ECRC_CHKE; 149 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 150 151 return 0; 152 } 153 154 /** 155 * disable_ercr_checking - disables PCIe ECRC checking for a device 156 * @dev: the PCI device 157 * 158 * Returns 0 on success, or negative on failure. 159 */ 160 static int disable_ecrc_checking(struct pci_dev *dev) 161 { 162 int pos; 163 u32 reg32; 164 165 if (!pci_is_pcie(dev)) 166 return -ENODEV; 167 168 pos = dev->aer_cap; 169 if (!pos) 170 return -ENODEV; 171 172 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); 173 reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); 174 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 175 176 return 0; 177 } 178 179 /** 180 * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy 181 * @dev: the PCI device 182 */ 183 void pcie_set_ecrc_checking(struct pci_dev *dev) 184 { 185 switch (ecrc_policy) { 186 case ECRC_POLICY_DEFAULT: 187 return; 188 case ECRC_POLICY_OFF: 189 disable_ecrc_checking(dev); 190 break; 191 case ECRC_POLICY_ON: 192 enable_ecrc_checking(dev); 193 break; 194 default: 195 return; 196 } 197 } 198 199 /** 200 * pcie_ecrc_get_policy - parse kernel command-line ecrc option 201 */ 202 void pcie_ecrc_get_policy(char *str) 203 { 204 int i; 205 206 i = match_string(ecrc_policy_str, ARRAY_SIZE(ecrc_policy_str), str); 207 if (i < 0) 208 return; 209 210 ecrc_policy = i; 211 } 212 #endif /* CONFIG_PCIE_ECRC */ 213 214 #ifdef CONFIG_ACPI_APEI 215 static inline int hest_match_pci(struct acpi_hest_aer_common *p, 216 struct pci_dev *pci) 217 { 218 return ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) && 219 ACPI_HEST_BUS(p->bus) == pci->bus->number && 220 p->device == PCI_SLOT(pci->devfn) && 221 p->function == PCI_FUNC(pci->devfn); 222 } 223 224 static inline bool hest_match_type(struct acpi_hest_header *hest_hdr, 225 struct pci_dev *dev) 226 { 227 u16 hest_type = hest_hdr->type; 228 u8 pcie_type = pci_pcie_type(dev); 229 230 if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT && 231 pcie_type == PCI_EXP_TYPE_ROOT_PORT) || 232 (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT && 233 pcie_type == PCI_EXP_TYPE_ENDPOINT) || 234 (hest_type == ACPI_HEST_TYPE_AER_BRIDGE && 235 (dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)) 236 return true; 237 return false; 238 } 239 240 struct aer_hest_parse_info { 241 struct pci_dev *pci_dev; 242 int firmware_first; 243 }; 244 245 static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr) 246 { 247 if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT || 248 hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT || 249 hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE) 250 return 1; 251 return 0; 252 } 253 254 static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) 255 { 256 struct aer_hest_parse_info *info = data; 257 struct acpi_hest_aer_common *p; 258 int ff; 259 260 if (!hest_source_is_pcie_aer(hest_hdr)) 261 return 0; 262 263 p = (struct acpi_hest_aer_common *)(hest_hdr + 1); 264 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); 265 266 /* 267 * If no specific device is supplied, determine whether 268 * FIRMWARE_FIRST is set for *any* PCIe device. 269 */ 270 if (!info->pci_dev) { 271 info->firmware_first |= ff; 272 return 0; 273 } 274 275 /* Otherwise, check the specific device */ 276 if (p->flags & ACPI_HEST_GLOBAL) { 277 if (hest_match_type(hest_hdr, info->pci_dev)) 278 info->firmware_first = ff; 279 } else 280 if (hest_match_pci(p, info->pci_dev)) 281 info->firmware_first = ff; 282 283 return 0; 284 } 285 286 static void aer_set_firmware_first(struct pci_dev *pci_dev) 287 { 288 int rc; 289 struct aer_hest_parse_info info = { 290 .pci_dev = pci_dev, 291 .firmware_first = 0, 292 }; 293 294 rc = apei_hest_parse(aer_hest_parse, &info); 295 296 if (rc) 297 pci_dev->__aer_firmware_first = 0; 298 else 299 pci_dev->__aer_firmware_first = info.firmware_first; 300 pci_dev->__aer_firmware_first_valid = 1; 301 } 302 303 int pcie_aer_get_firmware_first(struct pci_dev *dev) 304 { 305 if (!pci_is_pcie(dev)) 306 return 0; 307 308 if (pcie_ports_native) 309 return 0; 310 311 if (!dev->__aer_firmware_first_valid) 312 aer_set_firmware_first(dev); 313 return dev->__aer_firmware_first; 314 } 315 316 static bool aer_firmware_first; 317 318 /** 319 * aer_acpi_firmware_first - Check if APEI should control AER. 320 */ 321 bool aer_acpi_firmware_first(void) 322 { 323 static bool parsed = false; 324 struct aer_hest_parse_info info = { 325 .pci_dev = NULL, /* Check all PCIe devices */ 326 .firmware_first = 0, 327 }; 328 329 if (pcie_ports_native) 330 return false; 331 332 if (!parsed) { 333 apei_hest_parse(aer_hest_parse, &info); 334 aer_firmware_first = info.firmware_first; 335 parsed = true; 336 } 337 return aer_firmware_first; 338 } 339 #endif 340 341 #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \ 342 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE) 343 344 int pci_enable_pcie_error_reporting(struct pci_dev *dev) 345 { 346 if (pcie_aer_get_firmware_first(dev)) 347 return -EIO; 348 349 if (!dev->aer_cap) 350 return -EIO; 351 352 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); 353 } 354 EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); 355 356 int pci_disable_pcie_error_reporting(struct pci_dev *dev) 357 { 358 if (pcie_aer_get_firmware_first(dev)) 359 return -EIO; 360 361 return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, 362 PCI_EXP_AER_FLAGS); 363 } 364 EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); 365 366 void pci_aer_clear_device_status(struct pci_dev *dev) 367 { 368 u16 sta; 369 370 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); 371 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); 372 } 373 374 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 375 { 376 int pos; 377 u32 status, sev; 378 379 pos = dev->aer_cap; 380 if (!pos) 381 return -EIO; 382 383 if (pcie_aer_get_firmware_first(dev)) 384 return -EIO; 385 386 /* Clear status bits for ERR_NONFATAL errors only */ 387 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 388 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev); 389 status &= ~sev; 390 if (status) 391 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 392 393 return 0; 394 } 395 EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); 396 397 void pci_aer_clear_fatal_status(struct pci_dev *dev) 398 { 399 int pos; 400 u32 status, sev; 401 402 pos = dev->aer_cap; 403 if (!pos) 404 return; 405 406 if (pcie_aer_get_firmware_first(dev)) 407 return; 408 409 /* Clear status bits for ERR_FATAL errors only */ 410 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 411 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev); 412 status &= sev; 413 if (status) 414 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 415 } 416 417 int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) 418 { 419 int pos; 420 u32 status; 421 int port_type; 422 423 if (!pci_is_pcie(dev)) 424 return -ENODEV; 425 426 pos = dev->aer_cap; 427 if (!pos) 428 return -EIO; 429 430 if (pcie_aer_get_firmware_first(dev)) 431 return -EIO; 432 433 port_type = pci_pcie_type(dev); 434 if (port_type == PCI_EXP_TYPE_ROOT_PORT) { 435 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); 436 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status); 437 } 438 439 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); 440 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status); 441 442 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 443 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 444 445 return 0; 446 } 447 448 void pci_aer_init(struct pci_dev *dev) 449 { 450 dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 451 452 if (dev->aer_cap) 453 dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL); 454 455 pci_cleanup_aer_error_status_regs(dev); 456 } 457 458 void pci_aer_exit(struct pci_dev *dev) 459 { 460 kfree(dev->aer_stats); 461 dev->aer_stats = NULL; 462 } 463 464 #define AER_AGENT_RECEIVER 0 465 #define AER_AGENT_REQUESTER 1 466 #define AER_AGENT_COMPLETER 2 467 #define AER_AGENT_TRANSMITTER 3 468 469 #define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \ 470 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP)) 471 #define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \ 472 0 : PCI_ERR_UNC_COMP_ABORT) 473 #define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \ 474 (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0) 475 476 #define AER_GET_AGENT(t, e) \ 477 ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \ 478 (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \ 479 (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \ 480 AER_AGENT_RECEIVER) 481 482 #define AER_PHYSICAL_LAYER_ERROR 0 483 #define AER_DATA_LINK_LAYER_ERROR 1 484 #define AER_TRANSACTION_LAYER_ERROR 2 485 486 #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ 487 PCI_ERR_COR_RCVR : 0) 488 #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ 489 (PCI_ERR_COR_BAD_TLP| \ 490 PCI_ERR_COR_BAD_DLLP| \ 491 PCI_ERR_COR_REP_ROLL| \ 492 PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP) 493 494 #define AER_GET_LAYER_ERROR(t, e) \ 495 ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \ 496 (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \ 497 AER_TRANSACTION_LAYER_ERROR) 498 499 /* 500 * AER error strings 501 */ 502 static const char *aer_error_severity_string[] = { 503 "Uncorrected (Non-Fatal)", 504 "Uncorrected (Fatal)", 505 "Corrected" 506 }; 507 508 static const char *aer_error_layer[] = { 509 "Physical Layer", 510 "Data Link Layer", 511 "Transaction Layer" 512 }; 513 514 static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = { 515 "RxErr", /* Bit Position 0 */ 516 NULL, 517 NULL, 518 NULL, 519 NULL, 520 NULL, 521 "BadTLP", /* Bit Position 6 */ 522 "BadDLLP", /* Bit Position 7 */ 523 "Rollover", /* Bit Position 8 */ 524 NULL, 525 NULL, 526 NULL, 527 "Timeout", /* Bit Position 12 */ 528 "NonFatalErr", /* Bit Position 13 */ 529 "CorrIntErr", /* Bit Position 14 */ 530 "HeaderOF", /* Bit Position 15 */ 531 }; 532 533 static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = { 534 "Undefined", /* Bit Position 0 */ 535 NULL, 536 NULL, 537 NULL, 538 "DLP", /* Bit Position 4 */ 539 "SDES", /* Bit Position 5 */ 540 NULL, 541 NULL, 542 NULL, 543 NULL, 544 NULL, 545 NULL, 546 "TLP", /* Bit Position 12 */ 547 "FCP", /* Bit Position 13 */ 548 "CmpltTO", /* Bit Position 14 */ 549 "CmpltAbrt", /* Bit Position 15 */ 550 "UnxCmplt", /* Bit Position 16 */ 551 "RxOF", /* Bit Position 17 */ 552 "MalfTLP", /* Bit Position 18 */ 553 "ECRC", /* Bit Position 19 */ 554 "UnsupReq", /* Bit Position 20 */ 555 "ACSViol", /* Bit Position 21 */ 556 "UncorrIntErr", /* Bit Position 22 */ 557 "BlockedTLP", /* Bit Position 23 */ 558 "AtomicOpBlocked", /* Bit Position 24 */ 559 "TLPBlockedErr", /* Bit Position 25 */ 560 }; 561 562 static const char *aer_agent_string[] = { 563 "Receiver ID", 564 "Requester ID", 565 "Completer ID", 566 "Transmitter ID" 567 }; 568 569 #define aer_stats_dev_attr(name, stats_array, strings_array, \ 570 total_string, total_field) \ 571 static ssize_t \ 572 name##_show(struct device *dev, struct device_attribute *attr, \ 573 char *buf) \ 574 { \ 575 unsigned int i; \ 576 char *str = buf; \ 577 struct pci_dev *pdev = to_pci_dev(dev); \ 578 u64 *stats = pdev->aer_stats->stats_array; \ 579 \ 580 for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \ 581 if (strings_array[i]) \ 582 str += sprintf(str, "%s %llu\n", \ 583 strings_array[i], stats[i]); \ 584 else if (stats[i]) \ 585 str += sprintf(str, #stats_array "_bit[%d] %llu\n",\ 586 i, stats[i]); \ 587 } \ 588 str += sprintf(str, "TOTAL_%s %llu\n", total_string, \ 589 pdev->aer_stats->total_field); \ 590 return str-buf; \ 591 } \ 592 static DEVICE_ATTR_RO(name) 593 594 aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs, 595 aer_correctable_error_string, "ERR_COR", 596 dev_total_cor_errs); 597 aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs, 598 aer_uncorrectable_error_string, "ERR_FATAL", 599 dev_total_fatal_errs); 600 aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs, 601 aer_uncorrectable_error_string, "ERR_NONFATAL", 602 dev_total_nonfatal_errs); 603 604 #define aer_stats_rootport_attr(name, field) \ 605 static ssize_t \ 606 name##_show(struct device *dev, struct device_attribute *attr, \ 607 char *buf) \ 608 { \ 609 struct pci_dev *pdev = to_pci_dev(dev); \ 610 return sprintf(buf, "%llu\n", pdev->aer_stats->field); \ 611 } \ 612 static DEVICE_ATTR_RO(name) 613 614 aer_stats_rootport_attr(aer_rootport_total_err_cor, 615 rootport_total_cor_errs); 616 aer_stats_rootport_attr(aer_rootport_total_err_fatal, 617 rootport_total_fatal_errs); 618 aer_stats_rootport_attr(aer_rootport_total_err_nonfatal, 619 rootport_total_nonfatal_errs); 620 621 static struct attribute *aer_stats_attrs[] __ro_after_init = { 622 &dev_attr_aer_dev_correctable.attr, 623 &dev_attr_aer_dev_fatal.attr, 624 &dev_attr_aer_dev_nonfatal.attr, 625 &dev_attr_aer_rootport_total_err_cor.attr, 626 &dev_attr_aer_rootport_total_err_fatal.attr, 627 &dev_attr_aer_rootport_total_err_nonfatal.attr, 628 NULL 629 }; 630 631 static umode_t aer_stats_attrs_are_visible(struct kobject *kobj, 632 struct attribute *a, int n) 633 { 634 struct device *dev = kobj_to_dev(kobj); 635 struct pci_dev *pdev = to_pci_dev(dev); 636 637 if (!pdev->aer_stats) 638 return 0; 639 640 if ((a == &dev_attr_aer_rootport_total_err_cor.attr || 641 a == &dev_attr_aer_rootport_total_err_fatal.attr || 642 a == &dev_attr_aer_rootport_total_err_nonfatal.attr) && 643 pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) 644 return 0; 645 646 return a->mode; 647 } 648 649 const struct attribute_group aer_stats_attr_group = { 650 .attrs = aer_stats_attrs, 651 .is_visible = aer_stats_attrs_are_visible, 652 }; 653 654 static void pci_dev_aer_stats_incr(struct pci_dev *pdev, 655 struct aer_err_info *info) 656 { 657 int status, i, max = -1; 658 u64 *counter = NULL; 659 struct aer_stats *aer_stats = pdev->aer_stats; 660 661 if (!aer_stats) 662 return; 663 664 switch (info->severity) { 665 case AER_CORRECTABLE: 666 aer_stats->dev_total_cor_errs++; 667 counter = &aer_stats->dev_cor_errs[0]; 668 max = AER_MAX_TYPEOF_COR_ERRS; 669 break; 670 case AER_NONFATAL: 671 aer_stats->dev_total_nonfatal_errs++; 672 counter = &aer_stats->dev_nonfatal_errs[0]; 673 max = AER_MAX_TYPEOF_UNCOR_ERRS; 674 break; 675 case AER_FATAL: 676 aer_stats->dev_total_fatal_errs++; 677 counter = &aer_stats->dev_fatal_errs[0]; 678 max = AER_MAX_TYPEOF_UNCOR_ERRS; 679 break; 680 } 681 682 status = (info->status & ~info->mask); 683 for (i = 0; i < max; i++) 684 if (status & (1 << i)) 685 counter[i]++; 686 } 687 688 static void pci_rootport_aer_stats_incr(struct pci_dev *pdev, 689 struct aer_err_source *e_src) 690 { 691 struct aer_stats *aer_stats = pdev->aer_stats; 692 693 if (!aer_stats) 694 return; 695 696 if (e_src->status & PCI_ERR_ROOT_COR_RCV) 697 aer_stats->rootport_total_cor_errs++; 698 699 if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { 700 if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) 701 aer_stats->rootport_total_fatal_errs++; 702 else 703 aer_stats->rootport_total_nonfatal_errs++; 704 } 705 } 706 707 static void __print_tlp_header(struct pci_dev *dev, 708 struct aer_header_log_regs *t) 709 { 710 pci_err(dev, " TLP Header: %08x %08x %08x %08x\n", 711 t->dw0, t->dw1, t->dw2, t->dw3); 712 } 713 714 static void __aer_print_error(struct pci_dev *dev, 715 struct aer_err_info *info) 716 { 717 int i, status; 718 const char *errmsg = NULL; 719 status = (info->status & ~info->mask); 720 721 for (i = 0; i < 32; i++) { 722 if (!(status & (1 << i))) 723 continue; 724 725 if (info->severity == AER_CORRECTABLE) 726 errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? 727 aer_correctable_error_string[i] : NULL; 728 else 729 errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ? 730 aer_uncorrectable_error_string[i] : NULL; 731 732 if (errmsg) 733 pci_err(dev, " [%2d] %-22s%s\n", i, errmsg, 734 info->first_error == i ? " (First)" : ""); 735 else 736 pci_err(dev, " [%2d] Unknown Error Bit%s\n", 737 i, info->first_error == i ? " (First)" : ""); 738 } 739 pci_dev_aer_stats_incr(dev, info); 740 } 741 742 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) 743 { 744 int layer, agent; 745 int id = ((dev->bus->number << 8) | dev->devfn); 746 747 if (!info->status) { 748 pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n", 749 aer_error_severity_string[info->severity]); 750 goto out; 751 } 752 753 layer = AER_GET_LAYER_ERROR(info->severity, info->status); 754 agent = AER_GET_AGENT(info->severity, info->status); 755 756 pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n", 757 aer_error_severity_string[info->severity], 758 aer_error_layer[layer], aer_agent_string[agent]); 759 760 pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n", 761 dev->vendor, dev->device, 762 info->status, info->mask); 763 764 __aer_print_error(dev, info); 765 766 if (info->tlp_header_valid) 767 __print_tlp_header(dev, &info->tlp); 768 769 out: 770 if (info->id && info->error_dev_num > 1 && info->id == id) 771 pci_err(dev, " Error of this Agent is reported first\n"); 772 773 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), 774 info->severity, info->tlp_header_valid, &info->tlp); 775 } 776 777 static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) 778 { 779 u8 bus = info->id >> 8; 780 u8 devfn = info->id & 0xff; 781 782 pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n", 783 info->multi_error_valid ? "Multiple " : "", 784 aer_error_severity_string[info->severity], 785 pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 786 } 787 788 #ifdef CONFIG_ACPI_APEI_PCIEAER 789 int cper_severity_to_aer(int cper_severity) 790 { 791 switch (cper_severity) { 792 case CPER_SEV_RECOVERABLE: 793 return AER_NONFATAL; 794 case CPER_SEV_FATAL: 795 return AER_FATAL; 796 default: 797 return AER_CORRECTABLE; 798 } 799 } 800 EXPORT_SYMBOL_GPL(cper_severity_to_aer); 801 802 void cper_print_aer(struct pci_dev *dev, int aer_severity, 803 struct aer_capability_regs *aer) 804 { 805 int layer, agent, tlp_header_valid = 0; 806 u32 status, mask; 807 struct aer_err_info info; 808 809 if (aer_severity == AER_CORRECTABLE) { 810 status = aer->cor_status; 811 mask = aer->cor_mask; 812 } else { 813 status = aer->uncor_status; 814 mask = aer->uncor_mask; 815 tlp_header_valid = status & AER_LOG_TLP_MASKS; 816 } 817 818 layer = AER_GET_LAYER_ERROR(aer_severity, status); 819 agent = AER_GET_AGENT(aer_severity, status); 820 821 memset(&info, 0, sizeof(info)); 822 info.severity = aer_severity; 823 info.status = status; 824 info.mask = mask; 825 info.first_error = PCI_ERR_CAP_FEP(aer->cap_control); 826 827 pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); 828 __aer_print_error(dev, &info); 829 pci_err(dev, "aer_layer=%s, aer_agent=%s\n", 830 aer_error_layer[layer], aer_agent_string[agent]); 831 832 if (aer_severity != AER_CORRECTABLE) 833 pci_err(dev, "aer_uncor_severity: 0x%08x\n", 834 aer->uncor_severity); 835 836 if (tlp_header_valid) 837 __print_tlp_header(dev, &aer->header_log); 838 839 trace_aer_event(dev_name(&dev->dev), (status & ~mask), 840 aer_severity, tlp_header_valid, &aer->header_log); 841 } 842 #endif 843 844 /** 845 * add_error_device - list device to be handled 846 * @e_info: pointer to error info 847 * @dev: pointer to pci_dev to be added 848 */ 849 static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) 850 { 851 if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { 852 e_info->dev[e_info->error_dev_num] = pci_dev_get(dev); 853 e_info->error_dev_num++; 854 return 0; 855 } 856 return -ENOSPC; 857 } 858 859 /** 860 * is_error_source - check whether the device is source of reported error 861 * @dev: pointer to pci_dev to be checked 862 * @e_info: pointer to reported error info 863 */ 864 static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info) 865 { 866 int pos; 867 u32 status, mask; 868 u16 reg16; 869 870 /* 871 * When bus id is equal to 0, it might be a bad id 872 * reported by root port. 873 */ 874 if ((PCI_BUS_NUM(e_info->id) != 0) && 875 !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) { 876 /* Device ID match? */ 877 if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) 878 return true; 879 880 /* Continue id comparing if there is no multiple error */ 881 if (!e_info->multi_error_valid) 882 return false; 883 } 884 885 /* 886 * When either 887 * 1) bus id is equal to 0. Some ports might lose the bus 888 * id of error source id; 889 * 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set 890 * 3) There are multiple errors and prior ID comparing fails; 891 * We check AER status registers to find possible reporter. 892 */ 893 if (atomic_read(&dev->enable_cnt) == 0) 894 return false; 895 896 /* Check if AER is enabled */ 897 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, ®16); 898 if (!(reg16 & PCI_EXP_AER_FLAGS)) 899 return false; 900 901 pos = dev->aer_cap; 902 if (!pos) 903 return false; 904 905 /* Check if error is recorded */ 906 if (e_info->severity == AER_CORRECTABLE) { 907 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); 908 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask); 909 } else { 910 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 911 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); 912 } 913 if (status & ~mask) 914 return true; 915 916 return false; 917 } 918 919 static int find_device_iter(struct pci_dev *dev, void *data) 920 { 921 struct aer_err_info *e_info = (struct aer_err_info *)data; 922 923 if (is_error_source(dev, e_info)) { 924 /* List this device */ 925 if (add_error_device(e_info, dev)) { 926 /* We cannot handle more... Stop iteration */ 927 /* TODO: Should print error message here? */ 928 return 1; 929 } 930 931 /* If there is only a single error, stop iteration */ 932 if (!e_info->multi_error_valid) 933 return 1; 934 } 935 return 0; 936 } 937 938 /** 939 * find_source_device - search through device hierarchy for source device 940 * @parent: pointer to Root Port pci_dev data structure 941 * @e_info: including detailed error information such like id 942 * 943 * Return true if found. 944 * 945 * Invoked by DPC when error is detected at the Root Port. 946 * Caller of this function must set id, severity, and multi_error_valid of 947 * struct aer_err_info pointed by @e_info properly. This function must fill 948 * e_info->error_dev_num and e_info->dev[], based on the given information. 949 */ 950 static bool find_source_device(struct pci_dev *parent, 951 struct aer_err_info *e_info) 952 { 953 struct pci_dev *dev = parent; 954 int result; 955 956 /* Must reset in this function */ 957 e_info->error_dev_num = 0; 958 959 /* Is Root Port an agent that sends error message? */ 960 result = find_device_iter(dev, e_info); 961 if (result) 962 return true; 963 964 pci_walk_bus(parent->subordinate, find_device_iter, e_info); 965 966 if (!e_info->error_dev_num) { 967 pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n", 968 e_info->id); 969 return false; 970 } 971 return true; 972 } 973 974 /** 975 * handle_error_source - handle logging error into an event log 976 * @dev: pointer to pci_dev data structure of error source device 977 * @info: comprehensive error information 978 * 979 * Invoked when an error being detected by Root Port. 980 */ 981 static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) 982 { 983 int pos; 984 985 if (info->severity == AER_CORRECTABLE) { 986 /* 987 * Correctable error does not need software intervention. 988 * No need to go through error recovery process. 989 */ 990 pos = dev->aer_cap; 991 if (pos) 992 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, 993 info->status); 994 pci_aer_clear_device_status(dev); 995 } else if (info->severity == AER_NONFATAL) 996 pcie_do_recovery(dev, pci_channel_io_normal, 997 PCIE_PORT_SERVICE_AER); 998 else if (info->severity == AER_FATAL) 999 pcie_do_recovery(dev, pci_channel_io_frozen, 1000 PCIE_PORT_SERVICE_AER); 1001 pci_dev_put(dev); 1002 } 1003 1004 #ifdef CONFIG_ACPI_APEI_PCIEAER 1005 1006 #define AER_RECOVER_RING_ORDER 4 1007 #define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER) 1008 1009 struct aer_recover_entry { 1010 u8 bus; 1011 u8 devfn; 1012 u16 domain; 1013 int severity; 1014 struct aer_capability_regs *regs; 1015 }; 1016 1017 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry, 1018 AER_RECOVER_RING_SIZE); 1019 1020 static void aer_recover_work_func(struct work_struct *work) 1021 { 1022 struct aer_recover_entry entry; 1023 struct pci_dev *pdev; 1024 1025 while (kfifo_get(&aer_recover_ring, &entry)) { 1026 pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus, 1027 entry.devfn); 1028 if (!pdev) { 1029 pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n", 1030 entry.domain, entry.bus, 1031 PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn)); 1032 continue; 1033 } 1034 cper_print_aer(pdev, entry.severity, entry.regs); 1035 if (entry.severity == AER_NONFATAL) 1036 pcie_do_recovery(pdev, pci_channel_io_normal, 1037 PCIE_PORT_SERVICE_AER); 1038 else if (entry.severity == AER_FATAL) 1039 pcie_do_recovery(pdev, pci_channel_io_frozen, 1040 PCIE_PORT_SERVICE_AER); 1041 pci_dev_put(pdev); 1042 } 1043 } 1044 1045 /* 1046 * Mutual exclusion for writers of aer_recover_ring, reader side don't 1047 * need lock, because there is only one reader and lock is not needed 1048 * between reader and writer. 1049 */ 1050 static DEFINE_SPINLOCK(aer_recover_ring_lock); 1051 static DECLARE_WORK(aer_recover_work, aer_recover_work_func); 1052 1053 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, 1054 int severity, struct aer_capability_regs *aer_regs) 1055 { 1056 struct aer_recover_entry entry = { 1057 .bus = bus, 1058 .devfn = devfn, 1059 .domain = domain, 1060 .severity = severity, 1061 .regs = aer_regs, 1062 }; 1063 1064 if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1, 1065 &aer_recover_ring_lock)) 1066 schedule_work(&aer_recover_work); 1067 else 1068 pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n", 1069 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1070 } 1071 EXPORT_SYMBOL_GPL(aer_recover_queue); 1072 #endif 1073 1074 /** 1075 * aer_get_device_error_info - read error status from dev and store it to info 1076 * @dev: pointer to the device expected to have a error record 1077 * @info: pointer to structure to store the error record 1078 * 1079 * Return 1 on success, 0 on error. 1080 * 1081 * Note that @info is reused among all error devices. Clear fields properly. 1082 */ 1083 int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) 1084 { 1085 int pos, temp; 1086 1087 /* Must reset in this function */ 1088 info->status = 0; 1089 info->tlp_header_valid = 0; 1090 1091 pos = dev->aer_cap; 1092 1093 /* The device might not support AER */ 1094 if (!pos) 1095 return 0; 1096 1097 if (info->severity == AER_CORRECTABLE) { 1098 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, 1099 &info->status); 1100 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, 1101 &info->mask); 1102 if (!(info->status & ~info->mask)) 1103 return 0; 1104 } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || 1105 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM || 1106 info->severity == AER_NONFATAL) { 1107 1108 /* Link is still healthy for IO reads */ 1109 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, 1110 &info->status); 1111 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 1112 &info->mask); 1113 if (!(info->status & ~info->mask)) 1114 return 0; 1115 1116 /* Get First Error Pointer */ 1117 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp); 1118 info->first_error = PCI_ERR_CAP_FEP(temp); 1119 1120 if (info->status & AER_LOG_TLP_MASKS) { 1121 info->tlp_header_valid = 1; 1122 pci_read_config_dword(dev, 1123 pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); 1124 pci_read_config_dword(dev, 1125 pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1); 1126 pci_read_config_dword(dev, 1127 pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2); 1128 pci_read_config_dword(dev, 1129 pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3); 1130 } 1131 } 1132 1133 return 1; 1134 } 1135 1136 static inline void aer_process_err_devices(struct aer_err_info *e_info) 1137 { 1138 int i; 1139 1140 /* Report all before handle them, not to lost records by reset etc. */ 1141 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { 1142 if (aer_get_device_error_info(e_info->dev[i], e_info)) 1143 aer_print_error(e_info->dev[i], e_info); 1144 } 1145 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { 1146 if (aer_get_device_error_info(e_info->dev[i], e_info)) 1147 handle_error_source(e_info->dev[i], e_info); 1148 } 1149 } 1150 1151 /** 1152 * aer_isr_one_error - consume an error detected by root port 1153 * @rpc: pointer to the root port which holds an error 1154 * @e_src: pointer to an error source 1155 */ 1156 static void aer_isr_one_error(struct aer_rpc *rpc, 1157 struct aer_err_source *e_src) 1158 { 1159 struct pci_dev *pdev = rpc->rpd; 1160 struct aer_err_info e_info; 1161 1162 pci_rootport_aer_stats_incr(pdev, e_src); 1163 1164 /* 1165 * There is a possibility that both correctable error and 1166 * uncorrectable error being logged. Report correctable error first. 1167 */ 1168 if (e_src->status & PCI_ERR_ROOT_COR_RCV) { 1169 e_info.id = ERR_COR_ID(e_src->id); 1170 e_info.severity = AER_CORRECTABLE; 1171 1172 if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV) 1173 e_info.multi_error_valid = 1; 1174 else 1175 e_info.multi_error_valid = 0; 1176 aer_print_port_info(pdev, &e_info); 1177 1178 if (find_source_device(pdev, &e_info)) 1179 aer_process_err_devices(&e_info); 1180 } 1181 1182 if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { 1183 e_info.id = ERR_UNCOR_ID(e_src->id); 1184 1185 if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) 1186 e_info.severity = AER_FATAL; 1187 else 1188 e_info.severity = AER_NONFATAL; 1189 1190 if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV) 1191 e_info.multi_error_valid = 1; 1192 else 1193 e_info.multi_error_valid = 0; 1194 1195 aer_print_port_info(pdev, &e_info); 1196 1197 if (find_source_device(pdev, &e_info)) 1198 aer_process_err_devices(&e_info); 1199 } 1200 } 1201 1202 /** 1203 * aer_isr - consume errors detected by root port 1204 * @work: definition of this work item 1205 * 1206 * Invoked, as DPC, when root port records new detected error 1207 */ 1208 static irqreturn_t aer_isr(int irq, void *context) 1209 { 1210 struct pcie_device *dev = (struct pcie_device *)context; 1211 struct aer_rpc *rpc = get_service_data(dev); 1212 struct aer_err_source uninitialized_var(e_src); 1213 1214 if (kfifo_is_empty(&rpc->aer_fifo)) 1215 return IRQ_NONE; 1216 1217 while (kfifo_get(&rpc->aer_fifo, &e_src)) 1218 aer_isr_one_error(rpc, &e_src); 1219 return IRQ_HANDLED; 1220 } 1221 1222 /** 1223 * aer_irq - Root Port's ISR 1224 * @irq: IRQ assigned to Root Port 1225 * @context: pointer to Root Port data structure 1226 * 1227 * Invoked when Root Port detects AER messages. 1228 */ 1229 static irqreturn_t aer_irq(int irq, void *context) 1230 { 1231 struct pcie_device *pdev = (struct pcie_device *)context; 1232 struct aer_rpc *rpc = get_service_data(pdev); 1233 struct pci_dev *rp = rpc->rpd; 1234 struct aer_err_source e_src = {}; 1235 int pos = rp->aer_cap; 1236 1237 pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status); 1238 if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) 1239 return IRQ_NONE; 1240 1241 pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id); 1242 pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status); 1243 1244 if (!kfifo_put(&rpc->aer_fifo, e_src)) 1245 return IRQ_HANDLED; 1246 1247 return IRQ_WAKE_THREAD; 1248 } 1249 1250 static int set_device_error_reporting(struct pci_dev *dev, void *data) 1251 { 1252 bool enable = *((bool *)data); 1253 int type = pci_pcie_type(dev); 1254 1255 if ((type == PCI_EXP_TYPE_ROOT_PORT) || 1256 (type == PCI_EXP_TYPE_UPSTREAM) || 1257 (type == PCI_EXP_TYPE_DOWNSTREAM)) { 1258 if (enable) 1259 pci_enable_pcie_error_reporting(dev); 1260 else 1261 pci_disable_pcie_error_reporting(dev); 1262 } 1263 1264 if (enable) 1265 pcie_set_ecrc_checking(dev); 1266 1267 return 0; 1268 } 1269 1270 /** 1271 * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports. 1272 * @dev: pointer to root port's pci_dev data structure 1273 * @enable: true = enable error reporting, false = disable error reporting. 1274 */ 1275 static void set_downstream_devices_error_reporting(struct pci_dev *dev, 1276 bool enable) 1277 { 1278 set_device_error_reporting(dev, &enable); 1279 1280 if (!dev->subordinate) 1281 return; 1282 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); 1283 } 1284 1285 /** 1286 * aer_enable_rootport - enable Root Port's interrupts when receiving messages 1287 * @rpc: pointer to a Root Port data structure 1288 * 1289 * Invoked when PCIe bus loads AER service driver. 1290 */ 1291 static void aer_enable_rootport(struct aer_rpc *rpc) 1292 { 1293 struct pci_dev *pdev = rpc->rpd; 1294 int aer_pos; 1295 u16 reg16; 1296 u32 reg32; 1297 1298 /* Clear PCIe Capability's Device Status */ 1299 pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, ®16); 1300 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16); 1301 1302 /* Disable system error generation in response to error messages */ 1303 pcie_capability_clear_word(pdev, PCI_EXP_RTCTL, 1304 SYSTEM_ERROR_INTR_ON_MESG_MASK); 1305 1306 aer_pos = pdev->aer_cap; 1307 /* Clear error status */ 1308 pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, ®32); 1309 pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); 1310 pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, ®32); 1311 pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32); 1312 pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, ®32); 1313 pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); 1314 1315 /* 1316 * Enable error reporting for the root port device and downstream port 1317 * devices. 1318 */ 1319 set_downstream_devices_error_reporting(pdev, true); 1320 1321 /* Enable Root Port's interrupt in response to error messages */ 1322 pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, ®32); 1323 reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; 1324 pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32); 1325 } 1326 1327 /** 1328 * aer_disable_rootport - disable Root Port's interrupts when receiving messages 1329 * @rpc: pointer to a Root Port data structure 1330 * 1331 * Invoked when PCIe bus unloads AER service driver. 1332 */ 1333 static void aer_disable_rootport(struct aer_rpc *rpc) 1334 { 1335 struct pci_dev *pdev = rpc->rpd; 1336 u32 reg32; 1337 int pos; 1338 1339 /* 1340 * Disable error reporting for the root port device and downstream port 1341 * devices. 1342 */ 1343 set_downstream_devices_error_reporting(pdev, false); 1344 1345 pos = pdev->aer_cap; 1346 /* Disable Root's interrupt in response to error messages */ 1347 pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, ®32); 1348 reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; 1349 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32); 1350 1351 /* Clear Root's error status reg */ 1352 pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, ®32); 1353 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32); 1354 } 1355 1356 /** 1357 * aer_remove - clean up resources 1358 * @dev: pointer to the pcie_dev data structure 1359 * 1360 * Invoked when PCI Express bus unloads or AER probe fails. 1361 */ 1362 static void aer_remove(struct pcie_device *dev) 1363 { 1364 struct aer_rpc *rpc = get_service_data(dev); 1365 1366 aer_disable_rootport(rpc); 1367 } 1368 1369 /** 1370 * aer_probe - initialize resources 1371 * @dev: pointer to the pcie_dev data structure 1372 * 1373 * Invoked when PCI Express bus loads AER service driver. 1374 */ 1375 static int aer_probe(struct pcie_device *dev) 1376 { 1377 int status; 1378 struct aer_rpc *rpc; 1379 struct device *device = &dev->device; 1380 1381 rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL); 1382 if (!rpc) { 1383 dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n"); 1384 return -ENOMEM; 1385 } 1386 rpc->rpd = dev->port; 1387 set_service_data(dev, rpc); 1388 1389 status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr, 1390 IRQF_SHARED, "aerdrv", dev); 1391 if (status) { 1392 dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n", 1393 dev->irq); 1394 return status; 1395 } 1396 1397 aer_enable_rootport(rpc); 1398 dev_info(device, "AER enabled with IRQ %d\n", dev->irq); 1399 return 0; 1400 } 1401 1402 /** 1403 * aer_root_reset - reset link on Root Port 1404 * @dev: pointer to Root Port's pci_dev data structure 1405 * 1406 * Invoked by Port Bus driver when performing link reset at Root Port. 1407 */ 1408 static pci_ers_result_t aer_root_reset(struct pci_dev *dev) 1409 { 1410 u32 reg32; 1411 int pos; 1412 int rc; 1413 1414 pos = dev->aer_cap; 1415 1416 /* Disable Root's interrupt in response to error messages */ 1417 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); 1418 reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; 1419 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); 1420 1421 rc = pci_bus_error_reset(dev); 1422 pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n"); 1423 1424 /* Clear Root Error Status */ 1425 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); 1426 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); 1427 1428 /* Enable Root Port's interrupt in response to error messages */ 1429 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); 1430 reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; 1431 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); 1432 1433 return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1434 } 1435 1436 static struct pcie_port_service_driver aerdriver = { 1437 .name = "aer", 1438 .port_type = PCI_EXP_TYPE_ROOT_PORT, 1439 .service = PCIE_PORT_SERVICE_AER, 1440 1441 .probe = aer_probe, 1442 .remove = aer_remove, 1443 .reset_link = aer_root_reset, 1444 }; 1445 1446 /** 1447 * aer_service_init - register AER root service driver 1448 * 1449 * Invoked when AER root service driver is loaded. 1450 */ 1451 int __init pcie_aer_init(void) 1452 { 1453 if (!pci_aer_available() || aer_acpi_firmware_first()) 1454 return -ENXIO; 1455 return pcie_port_service_register(&aerdriver); 1456 } 1457