1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implement the AER root port service driver. The driver registers an IRQ 4 * handler. When a root port triggers an AER interrupt, the IRQ handler 5 * collects root port status and schedules work. 6 * 7 * Copyright (C) 2006 Intel Corp. 8 * Tom Long Nguyen (tom.l.nguyen@intel.com) 9 * Zhang Yanmin (yanmin.zhang@intel.com) 10 * 11 * (C) Copyright 2009 Hewlett-Packard Development Company, L.P. 12 * Andrew Patterson <andrew.patterson@hp.com> 13 */ 14 15 #define pr_fmt(fmt) "AER: " fmt 16 #define dev_fmt pr_fmt 17 18 #include <linux/bitops.h> 19 #include <linux/cper.h> 20 #include <linux/pci.h> 21 #include <linux/pci-acpi.h> 22 #include <linux/sched.h> 23 #include <linux/kernel.h> 24 #include <linux/errno.h> 25 #include <linux/pm.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/kfifo.h> 30 #include <linux/slab.h> 31 #include <acpi/apei.h> 32 #include <ras/ras_event.h> 33 34 #include "../pci.h" 35 #include "portdrv.h" 36 37 #define AER_ERROR_SOURCES_MAX 128 38 39 #define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */ 40 #define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/ 41 42 struct aer_err_source { 43 unsigned int status; 44 unsigned int id; 45 }; 46 47 struct aer_rpc { 48 struct pci_dev *rpd; /* Root Port device */ 49 DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX); 50 }; 51 52 /* AER stats for the device */ 53 struct aer_stats { 54 55 /* 56 * Fields for all AER capable devices. They indicate the errors 57 * "as seen by this device". Note that this may mean that if an 58 * end point is causing problems, the AER counters may increment 59 * at its link partner (e.g. root port) because the errors will be 60 * "seen" by the link partner and not the the problematic end point 61 * itself (which may report all counters as 0 as it never saw any 62 * problems). 63 */ 64 /* Counters for different type of correctable errors */ 65 u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS]; 66 /* Counters for different type of fatal uncorrectable errors */ 67 u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS]; 68 /* Counters for different type of nonfatal uncorrectable errors */ 69 u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS]; 70 /* Total number of ERR_COR sent by this device */ 71 u64 dev_total_cor_errs; 72 /* Total number of ERR_FATAL sent by this device */ 73 u64 dev_total_fatal_errs; 74 /* Total number of ERR_NONFATAL sent by this device */ 75 u64 dev_total_nonfatal_errs; 76 77 /* 78 * Fields for Root ports & root complex event collectors only, these 79 * indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL 80 * messages received by the root port / event collector, INCLUDING the 81 * ones that are generated internally (by the rootport itself) 82 */ 83 u64 rootport_total_cor_errs; 84 u64 rootport_total_fatal_errs; 85 u64 rootport_total_nonfatal_errs; 86 }; 87 88 #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \ 89 PCI_ERR_UNC_ECRC| \ 90 PCI_ERR_UNC_UNSUP| \ 91 PCI_ERR_UNC_COMP_ABORT| \ 92 PCI_ERR_UNC_UNX_COMP| \ 93 PCI_ERR_UNC_MALF_TLP) 94 95 #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \ 96 PCI_EXP_RTCTL_SENFEE| \ 97 PCI_EXP_RTCTL_SEFEE) 98 #define ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \ 99 PCI_ERR_ROOT_CMD_NONFATAL_EN| \ 100 PCI_ERR_ROOT_CMD_FATAL_EN) 101 #define ERR_COR_ID(d) (d & 0xffff) 102 #define ERR_UNCOR_ID(d) (d >> 16) 103 104 static int pcie_aer_disable; 105 106 void pci_no_aer(void) 107 { 108 pcie_aer_disable = 1; 109 } 110 111 bool pci_aer_available(void) 112 { 113 return !pcie_aer_disable && pci_msi_enabled(); 114 } 115 116 #ifdef CONFIG_PCIE_ECRC 117 118 #define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */ 119 #define ECRC_POLICY_OFF 1 /* ECRC off for performance */ 120 #define ECRC_POLICY_ON 2 /* ECRC on for data integrity */ 121 122 static int ecrc_policy = ECRC_POLICY_DEFAULT; 123 124 static const char * const ecrc_policy_str[] = { 125 [ECRC_POLICY_DEFAULT] = "bios", 126 [ECRC_POLICY_OFF] = "off", 127 [ECRC_POLICY_ON] = "on" 128 }; 129 130 /** 131 * enable_ercr_checking - enable PCIe ECRC checking for a device 132 * @dev: the PCI device 133 * 134 * Returns 0 on success, or negative on failure. 135 */ 136 static int enable_ecrc_checking(struct pci_dev *dev) 137 { 138 int pos; 139 u32 reg32; 140 141 if (!pci_is_pcie(dev)) 142 return -ENODEV; 143 144 pos = dev->aer_cap; 145 if (!pos) 146 return -ENODEV; 147 148 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); 149 if (reg32 & PCI_ERR_CAP_ECRC_GENC) 150 reg32 |= PCI_ERR_CAP_ECRC_GENE; 151 if (reg32 & PCI_ERR_CAP_ECRC_CHKC) 152 reg32 |= PCI_ERR_CAP_ECRC_CHKE; 153 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 154 155 return 0; 156 } 157 158 /** 159 * disable_ercr_checking - disables PCIe ECRC checking for a device 160 * @dev: the PCI device 161 * 162 * Returns 0 on success, or negative on failure. 163 */ 164 static int disable_ecrc_checking(struct pci_dev *dev) 165 { 166 int pos; 167 u32 reg32; 168 169 if (!pci_is_pcie(dev)) 170 return -ENODEV; 171 172 pos = dev->aer_cap; 173 if (!pos) 174 return -ENODEV; 175 176 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); 177 reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); 178 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 179 180 return 0; 181 } 182 183 /** 184 * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy 185 * @dev: the PCI device 186 */ 187 void pcie_set_ecrc_checking(struct pci_dev *dev) 188 { 189 switch (ecrc_policy) { 190 case ECRC_POLICY_DEFAULT: 191 return; 192 case ECRC_POLICY_OFF: 193 disable_ecrc_checking(dev); 194 break; 195 case ECRC_POLICY_ON: 196 enable_ecrc_checking(dev); 197 break; 198 default: 199 return; 200 } 201 } 202 203 /** 204 * pcie_ecrc_get_policy - parse kernel command-line ecrc option 205 * @str: ECRC policy from kernel command line to use 206 */ 207 void pcie_ecrc_get_policy(char *str) 208 { 209 int i; 210 211 i = match_string(ecrc_policy_str, ARRAY_SIZE(ecrc_policy_str), str); 212 if (i < 0) 213 return; 214 215 ecrc_policy = i; 216 } 217 #endif /* CONFIG_PCIE_ECRC */ 218 219 #ifdef CONFIG_ACPI_APEI 220 static inline int hest_match_pci(struct acpi_hest_aer_common *p, 221 struct pci_dev *pci) 222 { 223 return ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) && 224 ACPI_HEST_BUS(p->bus) == pci->bus->number && 225 p->device == PCI_SLOT(pci->devfn) && 226 p->function == PCI_FUNC(pci->devfn); 227 } 228 229 static inline bool hest_match_type(struct acpi_hest_header *hest_hdr, 230 struct pci_dev *dev) 231 { 232 u16 hest_type = hest_hdr->type; 233 u8 pcie_type = pci_pcie_type(dev); 234 235 if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT && 236 pcie_type == PCI_EXP_TYPE_ROOT_PORT) || 237 (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT && 238 pcie_type == PCI_EXP_TYPE_ENDPOINT) || 239 (hest_type == ACPI_HEST_TYPE_AER_BRIDGE && 240 (dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)) 241 return true; 242 return false; 243 } 244 245 struct aer_hest_parse_info { 246 struct pci_dev *pci_dev; 247 int firmware_first; 248 }; 249 250 static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr) 251 { 252 if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT || 253 hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT || 254 hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE) 255 return 1; 256 return 0; 257 } 258 259 static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) 260 { 261 struct aer_hest_parse_info *info = data; 262 struct acpi_hest_aer_common *p; 263 int ff; 264 265 if (!hest_source_is_pcie_aer(hest_hdr)) 266 return 0; 267 268 p = (struct acpi_hest_aer_common *)(hest_hdr + 1); 269 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); 270 271 /* 272 * If no specific device is supplied, determine whether 273 * FIRMWARE_FIRST is set for *any* PCIe device. 274 */ 275 if (!info->pci_dev) { 276 info->firmware_first |= ff; 277 return 0; 278 } 279 280 /* Otherwise, check the specific device */ 281 if (p->flags & ACPI_HEST_GLOBAL) { 282 if (hest_match_type(hest_hdr, info->pci_dev)) 283 info->firmware_first = ff; 284 } else 285 if (hest_match_pci(p, info->pci_dev)) 286 info->firmware_first = ff; 287 288 return 0; 289 } 290 291 static void aer_set_firmware_first(struct pci_dev *pci_dev) 292 { 293 int rc; 294 struct aer_hest_parse_info info = { 295 .pci_dev = pci_dev, 296 .firmware_first = 0, 297 }; 298 299 rc = apei_hest_parse(aer_hest_parse, &info); 300 301 if (rc) 302 pci_dev->__aer_firmware_first = 0; 303 else 304 pci_dev->__aer_firmware_first = info.firmware_first; 305 pci_dev->__aer_firmware_first_valid = 1; 306 } 307 308 int pcie_aer_get_firmware_first(struct pci_dev *dev) 309 { 310 if (!pci_is_pcie(dev)) 311 return 0; 312 313 if (pcie_ports_native) 314 return 0; 315 316 if (!dev->__aer_firmware_first_valid) 317 aer_set_firmware_first(dev); 318 return dev->__aer_firmware_first; 319 } 320 321 static bool aer_firmware_first; 322 323 /** 324 * aer_acpi_firmware_first - Check if APEI should control AER. 325 */ 326 bool aer_acpi_firmware_first(void) 327 { 328 static bool parsed = false; 329 struct aer_hest_parse_info info = { 330 .pci_dev = NULL, /* Check all PCIe devices */ 331 .firmware_first = 0, 332 }; 333 334 if (pcie_ports_native) 335 return false; 336 337 if (!parsed) { 338 apei_hest_parse(aer_hest_parse, &info); 339 aer_firmware_first = info.firmware_first; 340 parsed = true; 341 } 342 return aer_firmware_first; 343 } 344 #endif 345 346 #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \ 347 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE) 348 349 int pci_enable_pcie_error_reporting(struct pci_dev *dev) 350 { 351 if (pcie_aer_get_firmware_first(dev)) 352 return -EIO; 353 354 if (!dev->aer_cap) 355 return -EIO; 356 357 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); 358 } 359 EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); 360 361 int pci_disable_pcie_error_reporting(struct pci_dev *dev) 362 { 363 if (pcie_aer_get_firmware_first(dev)) 364 return -EIO; 365 366 return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, 367 PCI_EXP_AER_FLAGS); 368 } 369 EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); 370 371 void pci_aer_clear_device_status(struct pci_dev *dev) 372 { 373 u16 sta; 374 375 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); 376 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); 377 } 378 379 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 380 { 381 int pos; 382 u32 status, sev; 383 384 pos = dev->aer_cap; 385 if (!pos) 386 return -EIO; 387 388 if (pcie_aer_get_firmware_first(dev)) 389 return -EIO; 390 391 /* Clear status bits for ERR_NONFATAL errors only */ 392 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 393 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev); 394 status &= ~sev; 395 if (status) 396 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 397 398 return 0; 399 } 400 EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); 401 402 void pci_aer_clear_fatal_status(struct pci_dev *dev) 403 { 404 int pos; 405 u32 status, sev; 406 407 pos = dev->aer_cap; 408 if (!pos) 409 return; 410 411 if (pcie_aer_get_firmware_first(dev)) 412 return; 413 414 /* Clear status bits for ERR_FATAL errors only */ 415 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 416 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev); 417 status &= sev; 418 if (status) 419 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 420 } 421 422 int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) 423 { 424 int pos; 425 u32 status; 426 int port_type; 427 428 if (!pci_is_pcie(dev)) 429 return -ENODEV; 430 431 pos = dev->aer_cap; 432 if (!pos) 433 return -EIO; 434 435 if (pcie_aer_get_firmware_first(dev)) 436 return -EIO; 437 438 port_type = pci_pcie_type(dev); 439 if (port_type == PCI_EXP_TYPE_ROOT_PORT) { 440 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); 441 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status); 442 } 443 444 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); 445 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status); 446 447 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 448 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 449 450 return 0; 451 } 452 453 void pci_save_aer_state(struct pci_dev *dev) 454 { 455 struct pci_cap_saved_state *save_state; 456 u32 *cap; 457 int pos; 458 459 pos = dev->aer_cap; 460 if (!pos) 461 return; 462 463 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR); 464 if (!save_state) 465 return; 466 467 cap = &save_state->cap.data[0]; 468 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, cap++); 469 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, cap++); 470 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, cap++); 471 pci_read_config_dword(dev, pos + PCI_ERR_CAP, cap++); 472 if (pcie_cap_has_rtctl(dev)) 473 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, cap++); 474 } 475 476 void pci_restore_aer_state(struct pci_dev *dev) 477 { 478 struct pci_cap_saved_state *save_state; 479 u32 *cap; 480 int pos; 481 482 pos = dev->aer_cap; 483 if (!pos) 484 return; 485 486 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR); 487 if (!save_state) 488 return; 489 490 cap = &save_state->cap.data[0]; 491 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, *cap++); 492 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, *cap++); 493 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, *cap++); 494 pci_write_config_dword(dev, pos + PCI_ERR_CAP, *cap++); 495 if (pcie_cap_has_rtctl(dev)) 496 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, *cap++); 497 } 498 499 void pci_aer_init(struct pci_dev *dev) 500 { 501 int n; 502 503 dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 504 if (!dev->aer_cap) 505 return; 506 507 dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL); 508 509 /* 510 * We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER, 511 * PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event 512 * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec 513 * 7.8.4). 514 */ 515 n = pcie_cap_has_rtctl(dev) ? 5 : 4; 516 pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n); 517 518 pci_cleanup_aer_error_status_regs(dev); 519 } 520 521 void pci_aer_exit(struct pci_dev *dev) 522 { 523 kfree(dev->aer_stats); 524 dev->aer_stats = NULL; 525 } 526 527 #define AER_AGENT_RECEIVER 0 528 #define AER_AGENT_REQUESTER 1 529 #define AER_AGENT_COMPLETER 2 530 #define AER_AGENT_TRANSMITTER 3 531 532 #define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \ 533 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP)) 534 #define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \ 535 0 : PCI_ERR_UNC_COMP_ABORT) 536 #define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \ 537 (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0) 538 539 #define AER_GET_AGENT(t, e) \ 540 ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \ 541 (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \ 542 (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \ 543 AER_AGENT_RECEIVER) 544 545 #define AER_PHYSICAL_LAYER_ERROR 0 546 #define AER_DATA_LINK_LAYER_ERROR 1 547 #define AER_TRANSACTION_LAYER_ERROR 2 548 549 #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ 550 PCI_ERR_COR_RCVR : 0) 551 #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ 552 (PCI_ERR_COR_BAD_TLP| \ 553 PCI_ERR_COR_BAD_DLLP| \ 554 PCI_ERR_COR_REP_ROLL| \ 555 PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP) 556 557 #define AER_GET_LAYER_ERROR(t, e) \ 558 ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \ 559 (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \ 560 AER_TRANSACTION_LAYER_ERROR) 561 562 /* 563 * AER error strings 564 */ 565 static const char *aer_error_severity_string[] = { 566 "Uncorrected (Non-Fatal)", 567 "Uncorrected (Fatal)", 568 "Corrected" 569 }; 570 571 static const char *aer_error_layer[] = { 572 "Physical Layer", 573 "Data Link Layer", 574 "Transaction Layer" 575 }; 576 577 static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = { 578 "RxErr", /* Bit Position 0 */ 579 NULL, 580 NULL, 581 NULL, 582 NULL, 583 NULL, 584 "BadTLP", /* Bit Position 6 */ 585 "BadDLLP", /* Bit Position 7 */ 586 "Rollover", /* Bit Position 8 */ 587 NULL, 588 NULL, 589 NULL, 590 "Timeout", /* Bit Position 12 */ 591 "NonFatalErr", /* Bit Position 13 */ 592 "CorrIntErr", /* Bit Position 14 */ 593 "HeaderOF", /* Bit Position 15 */ 594 }; 595 596 static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = { 597 "Undefined", /* Bit Position 0 */ 598 NULL, 599 NULL, 600 NULL, 601 "DLP", /* Bit Position 4 */ 602 "SDES", /* Bit Position 5 */ 603 NULL, 604 NULL, 605 NULL, 606 NULL, 607 NULL, 608 NULL, 609 "TLP", /* Bit Position 12 */ 610 "FCP", /* Bit Position 13 */ 611 "CmpltTO", /* Bit Position 14 */ 612 "CmpltAbrt", /* Bit Position 15 */ 613 "UnxCmplt", /* Bit Position 16 */ 614 "RxOF", /* Bit Position 17 */ 615 "MalfTLP", /* Bit Position 18 */ 616 "ECRC", /* Bit Position 19 */ 617 "UnsupReq", /* Bit Position 20 */ 618 "ACSViol", /* Bit Position 21 */ 619 "UncorrIntErr", /* Bit Position 22 */ 620 "BlockedTLP", /* Bit Position 23 */ 621 "AtomicOpBlocked", /* Bit Position 24 */ 622 "TLPBlockedErr", /* Bit Position 25 */ 623 "PoisonTLPBlocked", /* Bit Position 26 */ 624 }; 625 626 static const char *aer_agent_string[] = { 627 "Receiver ID", 628 "Requester ID", 629 "Completer ID", 630 "Transmitter ID" 631 }; 632 633 #define aer_stats_dev_attr(name, stats_array, strings_array, \ 634 total_string, total_field) \ 635 static ssize_t \ 636 name##_show(struct device *dev, struct device_attribute *attr, \ 637 char *buf) \ 638 { \ 639 unsigned int i; \ 640 char *str = buf; \ 641 struct pci_dev *pdev = to_pci_dev(dev); \ 642 u64 *stats = pdev->aer_stats->stats_array; \ 643 \ 644 for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \ 645 if (strings_array[i]) \ 646 str += sprintf(str, "%s %llu\n", \ 647 strings_array[i], stats[i]); \ 648 else if (stats[i]) \ 649 str += sprintf(str, #stats_array "_bit[%d] %llu\n",\ 650 i, stats[i]); \ 651 } \ 652 str += sprintf(str, "TOTAL_%s %llu\n", total_string, \ 653 pdev->aer_stats->total_field); \ 654 return str-buf; \ 655 } \ 656 static DEVICE_ATTR_RO(name) 657 658 aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs, 659 aer_correctable_error_string, "ERR_COR", 660 dev_total_cor_errs); 661 aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs, 662 aer_uncorrectable_error_string, "ERR_FATAL", 663 dev_total_fatal_errs); 664 aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs, 665 aer_uncorrectable_error_string, "ERR_NONFATAL", 666 dev_total_nonfatal_errs); 667 668 #define aer_stats_rootport_attr(name, field) \ 669 static ssize_t \ 670 name##_show(struct device *dev, struct device_attribute *attr, \ 671 char *buf) \ 672 { \ 673 struct pci_dev *pdev = to_pci_dev(dev); \ 674 return sprintf(buf, "%llu\n", pdev->aer_stats->field); \ 675 } \ 676 static DEVICE_ATTR_RO(name) 677 678 aer_stats_rootport_attr(aer_rootport_total_err_cor, 679 rootport_total_cor_errs); 680 aer_stats_rootport_attr(aer_rootport_total_err_fatal, 681 rootport_total_fatal_errs); 682 aer_stats_rootport_attr(aer_rootport_total_err_nonfatal, 683 rootport_total_nonfatal_errs); 684 685 static struct attribute *aer_stats_attrs[] __ro_after_init = { 686 &dev_attr_aer_dev_correctable.attr, 687 &dev_attr_aer_dev_fatal.attr, 688 &dev_attr_aer_dev_nonfatal.attr, 689 &dev_attr_aer_rootport_total_err_cor.attr, 690 &dev_attr_aer_rootport_total_err_fatal.attr, 691 &dev_attr_aer_rootport_total_err_nonfatal.attr, 692 NULL 693 }; 694 695 static umode_t aer_stats_attrs_are_visible(struct kobject *kobj, 696 struct attribute *a, int n) 697 { 698 struct device *dev = kobj_to_dev(kobj); 699 struct pci_dev *pdev = to_pci_dev(dev); 700 701 if (!pdev->aer_stats) 702 return 0; 703 704 if ((a == &dev_attr_aer_rootport_total_err_cor.attr || 705 a == &dev_attr_aer_rootport_total_err_fatal.attr || 706 a == &dev_attr_aer_rootport_total_err_nonfatal.attr) && 707 pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) 708 return 0; 709 710 return a->mode; 711 } 712 713 const struct attribute_group aer_stats_attr_group = { 714 .attrs = aer_stats_attrs, 715 .is_visible = aer_stats_attrs_are_visible, 716 }; 717 718 static void pci_dev_aer_stats_incr(struct pci_dev *pdev, 719 struct aer_err_info *info) 720 { 721 unsigned long status = info->status & ~info->mask; 722 int i, max = -1; 723 u64 *counter = NULL; 724 struct aer_stats *aer_stats = pdev->aer_stats; 725 726 if (!aer_stats) 727 return; 728 729 switch (info->severity) { 730 case AER_CORRECTABLE: 731 aer_stats->dev_total_cor_errs++; 732 counter = &aer_stats->dev_cor_errs[0]; 733 max = AER_MAX_TYPEOF_COR_ERRS; 734 break; 735 case AER_NONFATAL: 736 aer_stats->dev_total_nonfatal_errs++; 737 counter = &aer_stats->dev_nonfatal_errs[0]; 738 max = AER_MAX_TYPEOF_UNCOR_ERRS; 739 break; 740 case AER_FATAL: 741 aer_stats->dev_total_fatal_errs++; 742 counter = &aer_stats->dev_fatal_errs[0]; 743 max = AER_MAX_TYPEOF_UNCOR_ERRS; 744 break; 745 } 746 747 for_each_set_bit(i, &status, max) 748 counter[i]++; 749 } 750 751 static void pci_rootport_aer_stats_incr(struct pci_dev *pdev, 752 struct aer_err_source *e_src) 753 { 754 struct aer_stats *aer_stats = pdev->aer_stats; 755 756 if (!aer_stats) 757 return; 758 759 if (e_src->status & PCI_ERR_ROOT_COR_RCV) 760 aer_stats->rootport_total_cor_errs++; 761 762 if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { 763 if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) 764 aer_stats->rootport_total_fatal_errs++; 765 else 766 aer_stats->rootport_total_nonfatal_errs++; 767 } 768 } 769 770 static void __print_tlp_header(struct pci_dev *dev, 771 struct aer_header_log_regs *t) 772 { 773 pci_err(dev, " TLP Header: %08x %08x %08x %08x\n", 774 t->dw0, t->dw1, t->dw2, t->dw3); 775 } 776 777 static void __aer_print_error(struct pci_dev *dev, 778 struct aer_err_info *info) 779 { 780 unsigned long status = info->status & ~info->mask; 781 const char *errmsg = NULL; 782 int i; 783 784 for_each_set_bit(i, &status, 32) { 785 if (info->severity == AER_CORRECTABLE) 786 errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? 787 aer_correctable_error_string[i] : NULL; 788 else 789 errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ? 790 aer_uncorrectable_error_string[i] : NULL; 791 792 if (errmsg) 793 pci_err(dev, " [%2d] %-22s%s\n", i, errmsg, 794 info->first_error == i ? " (First)" : ""); 795 else 796 pci_err(dev, " [%2d] Unknown Error Bit%s\n", 797 i, info->first_error == i ? " (First)" : ""); 798 } 799 pci_dev_aer_stats_incr(dev, info); 800 } 801 802 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) 803 { 804 int layer, agent; 805 int id = ((dev->bus->number << 8) | dev->devfn); 806 807 if (!info->status) { 808 pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n", 809 aer_error_severity_string[info->severity]); 810 goto out; 811 } 812 813 layer = AER_GET_LAYER_ERROR(info->severity, info->status); 814 agent = AER_GET_AGENT(info->severity, info->status); 815 816 pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n", 817 aer_error_severity_string[info->severity], 818 aer_error_layer[layer], aer_agent_string[agent]); 819 820 pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n", 821 dev->vendor, dev->device, 822 info->status, info->mask); 823 824 __aer_print_error(dev, info); 825 826 if (info->tlp_header_valid) 827 __print_tlp_header(dev, &info->tlp); 828 829 out: 830 if (info->id && info->error_dev_num > 1 && info->id == id) 831 pci_err(dev, " Error of this Agent is reported first\n"); 832 833 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), 834 info->severity, info->tlp_header_valid, &info->tlp); 835 } 836 837 static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) 838 { 839 u8 bus = info->id >> 8; 840 u8 devfn = info->id & 0xff; 841 842 pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n", 843 info->multi_error_valid ? "Multiple " : "", 844 aer_error_severity_string[info->severity], 845 pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), 846 PCI_FUNC(devfn)); 847 } 848 849 #ifdef CONFIG_ACPI_APEI_PCIEAER 850 int cper_severity_to_aer(int cper_severity) 851 { 852 switch (cper_severity) { 853 case CPER_SEV_RECOVERABLE: 854 return AER_NONFATAL; 855 case CPER_SEV_FATAL: 856 return AER_FATAL; 857 default: 858 return AER_CORRECTABLE; 859 } 860 } 861 EXPORT_SYMBOL_GPL(cper_severity_to_aer); 862 863 void cper_print_aer(struct pci_dev *dev, int aer_severity, 864 struct aer_capability_regs *aer) 865 { 866 int layer, agent, tlp_header_valid = 0; 867 u32 status, mask; 868 struct aer_err_info info; 869 870 if (aer_severity == AER_CORRECTABLE) { 871 status = aer->cor_status; 872 mask = aer->cor_mask; 873 } else { 874 status = aer->uncor_status; 875 mask = aer->uncor_mask; 876 tlp_header_valid = status & AER_LOG_TLP_MASKS; 877 } 878 879 layer = AER_GET_LAYER_ERROR(aer_severity, status); 880 agent = AER_GET_AGENT(aer_severity, status); 881 882 memset(&info, 0, sizeof(info)); 883 info.severity = aer_severity; 884 info.status = status; 885 info.mask = mask; 886 info.first_error = PCI_ERR_CAP_FEP(aer->cap_control); 887 888 pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); 889 __aer_print_error(dev, &info); 890 pci_err(dev, "aer_layer=%s, aer_agent=%s\n", 891 aer_error_layer[layer], aer_agent_string[agent]); 892 893 if (aer_severity != AER_CORRECTABLE) 894 pci_err(dev, "aer_uncor_severity: 0x%08x\n", 895 aer->uncor_severity); 896 897 if (tlp_header_valid) 898 __print_tlp_header(dev, &aer->header_log); 899 900 trace_aer_event(dev_name(&dev->dev), (status & ~mask), 901 aer_severity, tlp_header_valid, &aer->header_log); 902 } 903 #endif 904 905 /** 906 * add_error_device - list device to be handled 907 * @e_info: pointer to error info 908 * @dev: pointer to pci_dev to be added 909 */ 910 static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) 911 { 912 if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { 913 e_info->dev[e_info->error_dev_num] = pci_dev_get(dev); 914 e_info->error_dev_num++; 915 return 0; 916 } 917 return -ENOSPC; 918 } 919 920 /** 921 * is_error_source - check whether the device is source of reported error 922 * @dev: pointer to pci_dev to be checked 923 * @e_info: pointer to reported error info 924 */ 925 static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info) 926 { 927 int pos; 928 u32 status, mask; 929 u16 reg16; 930 931 /* 932 * When bus id is equal to 0, it might be a bad id 933 * reported by root port. 934 */ 935 if ((PCI_BUS_NUM(e_info->id) != 0) && 936 !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) { 937 /* Device ID match? */ 938 if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) 939 return true; 940 941 /* Continue id comparing if there is no multiple error */ 942 if (!e_info->multi_error_valid) 943 return false; 944 } 945 946 /* 947 * When either 948 * 1) bus id is equal to 0. Some ports might lose the bus 949 * id of error source id; 950 * 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set 951 * 3) There are multiple errors and prior ID comparing fails; 952 * We check AER status registers to find possible reporter. 953 */ 954 if (atomic_read(&dev->enable_cnt) == 0) 955 return false; 956 957 /* Check if AER is enabled */ 958 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, ®16); 959 if (!(reg16 & PCI_EXP_AER_FLAGS)) 960 return false; 961 962 pos = dev->aer_cap; 963 if (!pos) 964 return false; 965 966 /* Check if error is recorded */ 967 if (e_info->severity == AER_CORRECTABLE) { 968 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); 969 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask); 970 } else { 971 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 972 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); 973 } 974 if (status & ~mask) 975 return true; 976 977 return false; 978 } 979 980 static int find_device_iter(struct pci_dev *dev, void *data) 981 { 982 struct aer_err_info *e_info = (struct aer_err_info *)data; 983 984 if (is_error_source(dev, e_info)) { 985 /* List this device */ 986 if (add_error_device(e_info, dev)) { 987 /* We cannot handle more... Stop iteration */ 988 /* TODO: Should print error message here? */ 989 return 1; 990 } 991 992 /* If there is only a single error, stop iteration */ 993 if (!e_info->multi_error_valid) 994 return 1; 995 } 996 return 0; 997 } 998 999 /** 1000 * find_source_device - search through device hierarchy for source device 1001 * @parent: pointer to Root Port pci_dev data structure 1002 * @e_info: including detailed error information such like id 1003 * 1004 * Return true if found. 1005 * 1006 * Invoked by DPC when error is detected at the Root Port. 1007 * Caller of this function must set id, severity, and multi_error_valid of 1008 * struct aer_err_info pointed by @e_info properly. This function must fill 1009 * e_info->error_dev_num and e_info->dev[], based on the given information. 1010 */ 1011 static bool find_source_device(struct pci_dev *parent, 1012 struct aer_err_info *e_info) 1013 { 1014 struct pci_dev *dev = parent; 1015 int result; 1016 1017 /* Must reset in this function */ 1018 e_info->error_dev_num = 0; 1019 1020 /* Is Root Port an agent that sends error message? */ 1021 result = find_device_iter(dev, e_info); 1022 if (result) 1023 return true; 1024 1025 pci_walk_bus(parent->subordinate, find_device_iter, e_info); 1026 1027 if (!e_info->error_dev_num) { 1028 pci_info(parent, "can't find device of ID%04x\n", e_info->id); 1029 return false; 1030 } 1031 return true; 1032 } 1033 1034 /** 1035 * handle_error_source - handle logging error into an event log 1036 * @dev: pointer to pci_dev data structure of error source device 1037 * @info: comprehensive error information 1038 * 1039 * Invoked when an error being detected by Root Port. 1040 */ 1041 static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) 1042 { 1043 int pos; 1044 1045 if (info->severity == AER_CORRECTABLE) { 1046 /* 1047 * Correctable error does not need software intervention. 1048 * No need to go through error recovery process. 1049 */ 1050 pos = dev->aer_cap; 1051 if (pos) 1052 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, 1053 info->status); 1054 pci_aer_clear_device_status(dev); 1055 } else if (info->severity == AER_NONFATAL) 1056 pcie_do_recovery(dev, pci_channel_io_normal, 1057 PCIE_PORT_SERVICE_AER); 1058 else if (info->severity == AER_FATAL) 1059 pcie_do_recovery(dev, pci_channel_io_frozen, 1060 PCIE_PORT_SERVICE_AER); 1061 pci_dev_put(dev); 1062 } 1063 1064 #ifdef CONFIG_ACPI_APEI_PCIEAER 1065 1066 #define AER_RECOVER_RING_ORDER 4 1067 #define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER) 1068 1069 struct aer_recover_entry { 1070 u8 bus; 1071 u8 devfn; 1072 u16 domain; 1073 int severity; 1074 struct aer_capability_regs *regs; 1075 }; 1076 1077 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry, 1078 AER_RECOVER_RING_SIZE); 1079 1080 static void aer_recover_work_func(struct work_struct *work) 1081 { 1082 struct aer_recover_entry entry; 1083 struct pci_dev *pdev; 1084 1085 while (kfifo_get(&aer_recover_ring, &entry)) { 1086 pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus, 1087 entry.devfn); 1088 if (!pdev) { 1089 pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n", 1090 entry.domain, entry.bus, 1091 PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn)); 1092 continue; 1093 } 1094 cper_print_aer(pdev, entry.severity, entry.regs); 1095 if (entry.severity == AER_NONFATAL) 1096 pcie_do_recovery(pdev, pci_channel_io_normal, 1097 PCIE_PORT_SERVICE_AER); 1098 else if (entry.severity == AER_FATAL) 1099 pcie_do_recovery(pdev, pci_channel_io_frozen, 1100 PCIE_PORT_SERVICE_AER); 1101 pci_dev_put(pdev); 1102 } 1103 } 1104 1105 /* 1106 * Mutual exclusion for writers of aer_recover_ring, reader side don't 1107 * need lock, because there is only one reader and lock is not needed 1108 * between reader and writer. 1109 */ 1110 static DEFINE_SPINLOCK(aer_recover_ring_lock); 1111 static DECLARE_WORK(aer_recover_work, aer_recover_work_func); 1112 1113 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, 1114 int severity, struct aer_capability_regs *aer_regs) 1115 { 1116 struct aer_recover_entry entry = { 1117 .bus = bus, 1118 .devfn = devfn, 1119 .domain = domain, 1120 .severity = severity, 1121 .regs = aer_regs, 1122 }; 1123 1124 if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1, 1125 &aer_recover_ring_lock)) 1126 schedule_work(&aer_recover_work); 1127 else 1128 pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n", 1129 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1130 } 1131 EXPORT_SYMBOL_GPL(aer_recover_queue); 1132 #endif 1133 1134 /** 1135 * aer_get_device_error_info - read error status from dev and store it to info 1136 * @dev: pointer to the device expected to have a error record 1137 * @info: pointer to structure to store the error record 1138 * 1139 * Return 1 on success, 0 on error. 1140 * 1141 * Note that @info is reused among all error devices. Clear fields properly. 1142 */ 1143 int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) 1144 { 1145 int pos, temp; 1146 1147 /* Must reset in this function */ 1148 info->status = 0; 1149 info->tlp_header_valid = 0; 1150 1151 pos = dev->aer_cap; 1152 1153 /* The device might not support AER */ 1154 if (!pos) 1155 return 0; 1156 1157 if (info->severity == AER_CORRECTABLE) { 1158 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, 1159 &info->status); 1160 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, 1161 &info->mask); 1162 if (!(info->status & ~info->mask)) 1163 return 0; 1164 } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || 1165 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM || 1166 info->severity == AER_NONFATAL) { 1167 1168 /* Link is still healthy for IO reads */ 1169 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, 1170 &info->status); 1171 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 1172 &info->mask); 1173 if (!(info->status & ~info->mask)) 1174 return 0; 1175 1176 /* Get First Error Pointer */ 1177 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp); 1178 info->first_error = PCI_ERR_CAP_FEP(temp); 1179 1180 if (info->status & AER_LOG_TLP_MASKS) { 1181 info->tlp_header_valid = 1; 1182 pci_read_config_dword(dev, 1183 pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); 1184 pci_read_config_dword(dev, 1185 pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1); 1186 pci_read_config_dword(dev, 1187 pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2); 1188 pci_read_config_dword(dev, 1189 pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3); 1190 } 1191 } 1192 1193 return 1; 1194 } 1195 1196 static inline void aer_process_err_devices(struct aer_err_info *e_info) 1197 { 1198 int i; 1199 1200 /* Report all before handle them, not to lost records by reset etc. */ 1201 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { 1202 if (aer_get_device_error_info(e_info->dev[i], e_info)) 1203 aer_print_error(e_info->dev[i], e_info); 1204 } 1205 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { 1206 if (aer_get_device_error_info(e_info->dev[i], e_info)) 1207 handle_error_source(e_info->dev[i], e_info); 1208 } 1209 } 1210 1211 /** 1212 * aer_isr_one_error - consume an error detected by root port 1213 * @rpc: pointer to the root port which holds an error 1214 * @e_src: pointer to an error source 1215 */ 1216 static void aer_isr_one_error(struct aer_rpc *rpc, 1217 struct aer_err_source *e_src) 1218 { 1219 struct pci_dev *pdev = rpc->rpd; 1220 struct aer_err_info e_info; 1221 1222 pci_rootport_aer_stats_incr(pdev, e_src); 1223 1224 /* 1225 * There is a possibility that both correctable error and 1226 * uncorrectable error being logged. Report correctable error first. 1227 */ 1228 if (e_src->status & PCI_ERR_ROOT_COR_RCV) { 1229 e_info.id = ERR_COR_ID(e_src->id); 1230 e_info.severity = AER_CORRECTABLE; 1231 1232 if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV) 1233 e_info.multi_error_valid = 1; 1234 else 1235 e_info.multi_error_valid = 0; 1236 aer_print_port_info(pdev, &e_info); 1237 1238 if (find_source_device(pdev, &e_info)) 1239 aer_process_err_devices(&e_info); 1240 } 1241 1242 if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { 1243 e_info.id = ERR_UNCOR_ID(e_src->id); 1244 1245 if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) 1246 e_info.severity = AER_FATAL; 1247 else 1248 e_info.severity = AER_NONFATAL; 1249 1250 if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV) 1251 e_info.multi_error_valid = 1; 1252 else 1253 e_info.multi_error_valid = 0; 1254 1255 aer_print_port_info(pdev, &e_info); 1256 1257 if (find_source_device(pdev, &e_info)) 1258 aer_process_err_devices(&e_info); 1259 } 1260 } 1261 1262 /** 1263 * aer_isr - consume errors detected by root port 1264 * @irq: IRQ assigned to Root Port 1265 * @context: pointer to Root Port data structure 1266 * 1267 * Invoked, as DPC, when root port records new detected error 1268 */ 1269 static irqreturn_t aer_isr(int irq, void *context) 1270 { 1271 struct pcie_device *dev = (struct pcie_device *)context; 1272 struct aer_rpc *rpc = get_service_data(dev); 1273 struct aer_err_source uninitialized_var(e_src); 1274 1275 if (kfifo_is_empty(&rpc->aer_fifo)) 1276 return IRQ_NONE; 1277 1278 while (kfifo_get(&rpc->aer_fifo, &e_src)) 1279 aer_isr_one_error(rpc, &e_src); 1280 return IRQ_HANDLED; 1281 } 1282 1283 /** 1284 * aer_irq - Root Port's ISR 1285 * @irq: IRQ assigned to Root Port 1286 * @context: pointer to Root Port data structure 1287 * 1288 * Invoked when Root Port detects AER messages. 1289 */ 1290 static irqreturn_t aer_irq(int irq, void *context) 1291 { 1292 struct pcie_device *pdev = (struct pcie_device *)context; 1293 struct aer_rpc *rpc = get_service_data(pdev); 1294 struct pci_dev *rp = rpc->rpd; 1295 struct aer_err_source e_src = {}; 1296 int pos = rp->aer_cap; 1297 1298 pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status); 1299 if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) 1300 return IRQ_NONE; 1301 1302 pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id); 1303 pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status); 1304 1305 if (!kfifo_put(&rpc->aer_fifo, e_src)) 1306 return IRQ_HANDLED; 1307 1308 return IRQ_WAKE_THREAD; 1309 } 1310 1311 static int set_device_error_reporting(struct pci_dev *dev, void *data) 1312 { 1313 bool enable = *((bool *)data); 1314 int type = pci_pcie_type(dev); 1315 1316 if ((type == PCI_EXP_TYPE_ROOT_PORT) || 1317 (type == PCI_EXP_TYPE_UPSTREAM) || 1318 (type == PCI_EXP_TYPE_DOWNSTREAM)) { 1319 if (enable) 1320 pci_enable_pcie_error_reporting(dev); 1321 else 1322 pci_disable_pcie_error_reporting(dev); 1323 } 1324 1325 if (enable) 1326 pcie_set_ecrc_checking(dev); 1327 1328 return 0; 1329 } 1330 1331 /** 1332 * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports. 1333 * @dev: pointer to root port's pci_dev data structure 1334 * @enable: true = enable error reporting, false = disable error reporting. 1335 */ 1336 static void set_downstream_devices_error_reporting(struct pci_dev *dev, 1337 bool enable) 1338 { 1339 set_device_error_reporting(dev, &enable); 1340 1341 if (!dev->subordinate) 1342 return; 1343 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); 1344 } 1345 1346 /** 1347 * aer_enable_rootport - enable Root Port's interrupts when receiving messages 1348 * @rpc: pointer to a Root Port data structure 1349 * 1350 * Invoked when PCIe bus loads AER service driver. 1351 */ 1352 static void aer_enable_rootport(struct aer_rpc *rpc) 1353 { 1354 struct pci_dev *pdev = rpc->rpd; 1355 int aer_pos; 1356 u16 reg16; 1357 u32 reg32; 1358 1359 /* Clear PCIe Capability's Device Status */ 1360 pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, ®16); 1361 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16); 1362 1363 /* Disable system error generation in response to error messages */ 1364 pcie_capability_clear_word(pdev, PCI_EXP_RTCTL, 1365 SYSTEM_ERROR_INTR_ON_MESG_MASK); 1366 1367 aer_pos = pdev->aer_cap; 1368 /* Clear error status */ 1369 pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, ®32); 1370 pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); 1371 pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, ®32); 1372 pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32); 1373 pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, ®32); 1374 pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); 1375 1376 /* 1377 * Enable error reporting for the root port device and downstream port 1378 * devices. 1379 */ 1380 set_downstream_devices_error_reporting(pdev, true); 1381 1382 /* Enable Root Port's interrupt in response to error messages */ 1383 pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, ®32); 1384 reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; 1385 pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32); 1386 } 1387 1388 /** 1389 * aer_disable_rootport - disable Root Port's interrupts when receiving messages 1390 * @rpc: pointer to a Root Port data structure 1391 * 1392 * Invoked when PCIe bus unloads AER service driver. 1393 */ 1394 static void aer_disable_rootport(struct aer_rpc *rpc) 1395 { 1396 struct pci_dev *pdev = rpc->rpd; 1397 u32 reg32; 1398 int pos; 1399 1400 /* 1401 * Disable error reporting for the root port device and downstream port 1402 * devices. 1403 */ 1404 set_downstream_devices_error_reporting(pdev, false); 1405 1406 pos = pdev->aer_cap; 1407 /* Disable Root's interrupt in response to error messages */ 1408 pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, ®32); 1409 reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; 1410 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32); 1411 1412 /* Clear Root's error status reg */ 1413 pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, ®32); 1414 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32); 1415 } 1416 1417 /** 1418 * aer_remove - clean up resources 1419 * @dev: pointer to the pcie_dev data structure 1420 * 1421 * Invoked when PCI Express bus unloads or AER probe fails. 1422 */ 1423 static void aer_remove(struct pcie_device *dev) 1424 { 1425 struct aer_rpc *rpc = get_service_data(dev); 1426 1427 aer_disable_rootport(rpc); 1428 } 1429 1430 /** 1431 * aer_probe - initialize resources 1432 * @dev: pointer to the pcie_dev data structure 1433 * 1434 * Invoked when PCI Express bus loads AER service driver. 1435 */ 1436 static int aer_probe(struct pcie_device *dev) 1437 { 1438 int status; 1439 struct aer_rpc *rpc; 1440 struct device *device = &dev->device; 1441 struct pci_dev *port = dev->port; 1442 1443 rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL); 1444 if (!rpc) 1445 return -ENOMEM; 1446 1447 rpc->rpd = port; 1448 INIT_KFIFO(rpc->aer_fifo); 1449 set_service_data(dev, rpc); 1450 1451 status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr, 1452 IRQF_SHARED, "aerdrv", dev); 1453 if (status) { 1454 pci_err(port, "request AER IRQ %d failed\n", dev->irq); 1455 return status; 1456 } 1457 1458 aer_enable_rootport(rpc); 1459 pci_info(port, "enabled with IRQ %d\n", dev->irq); 1460 return 0; 1461 } 1462 1463 /** 1464 * aer_root_reset - reset link on Root Port 1465 * @dev: pointer to Root Port's pci_dev data structure 1466 * 1467 * Invoked by Port Bus driver when performing link reset at Root Port. 1468 */ 1469 static pci_ers_result_t aer_root_reset(struct pci_dev *dev) 1470 { 1471 u32 reg32; 1472 int pos; 1473 int rc; 1474 1475 pos = dev->aer_cap; 1476 1477 /* Disable Root's interrupt in response to error messages */ 1478 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); 1479 reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; 1480 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); 1481 1482 rc = pci_bus_error_reset(dev); 1483 pci_info(dev, "Root Port link has been reset\n"); 1484 1485 /* Clear Root Error Status */ 1486 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); 1487 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); 1488 1489 /* Enable Root Port's interrupt in response to error messages */ 1490 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); 1491 reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; 1492 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); 1493 1494 return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1495 } 1496 1497 static struct pcie_port_service_driver aerdriver = { 1498 .name = "aer", 1499 .port_type = PCI_EXP_TYPE_ROOT_PORT, 1500 .service = PCIE_PORT_SERVICE_AER, 1501 1502 .probe = aer_probe, 1503 .remove = aer_remove, 1504 .reset_link = aer_root_reset, 1505 }; 1506 1507 /** 1508 * aer_service_init - register AER root service driver 1509 * 1510 * Invoked when AER root service driver is loaded. 1511 */ 1512 int __init pcie_aer_init(void) 1513 { 1514 if (!pci_aer_available() || aer_acpi_firmware_first()) 1515 return -ENXIO; 1516 return pcie_port_service_register(&aerdriver); 1517 } 1518